Merge branch 'drm-fixes' of git://people.freedesktop.org/~airlied/linux

Pull i915 drm fixes from Dave Airlie:
 "Jani sent a bunch of i915 display fixes as my weekend started, but
  hopefully you can fit them in"

* 'drm-fixes' of git://people.freedesktop.org/~airlied/linux:
  drm/i915: fix error path in intel_setup_gmbus()
  drm/i915/skl: Fix typo in DPLL_CFGCR1 definition
  drm/i915/skl: Don't skip mst encoders in skl_ddi_pll_select()
  drm/i915: Pretend cursor is always on for ILK-style WM calculations (v2)
  drm/i915/dp: reduce missing TPS3 support errors to debug logging
  drm/i915/dp: abstract training pattern selection
  drm/i915/dsi: skip gpio element execution when not supported
  drm/i915/dsi: don't pass arbitrary data to sideband
  drm/i915/dsi: defend gpio table against out of bounds access
  drm/i915/bxt: Don't save/restore eDP panel power during suspend (v3)
  drm/i915: Allow i915_gem_object_get_page() on userptr as well
This commit is contained in:
Linus Torvalds 2016-02-14 18:34:12 -08:00
commit 631c0e84d9
10 changed files with 77 additions and 28 deletions

View File

@ -1988,6 +1988,9 @@ enum hdmi_force_audio {
#define I915_GTT_OFFSET_NONE ((u32)-1)
struct drm_i915_gem_object_ops {
unsigned int flags;
#define I915_GEM_OBJECT_HAS_STRUCT_PAGE 0x1
/* Interface between the GEM object and its backing storage.
* get_pages() is called once prior to the use of the associated set
* of pages before to binding them into the GTT, and put_pages() is
@ -2003,6 +2006,7 @@ struct drm_i915_gem_object_ops {
*/
int (*get_pages)(struct drm_i915_gem_object *);
void (*put_pages)(struct drm_i915_gem_object *);
int (*dmabuf_export)(struct drm_i915_gem_object *);
void (*release)(struct drm_i915_gem_object *);
};

View File

@ -4425,6 +4425,7 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
}
static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
.flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE,
.get_pages = i915_gem_object_get_pages_gtt,
.put_pages = i915_gem_object_put_pages_gtt,
};
@ -5261,7 +5262,7 @@ i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj, int n)
struct page *page;
/* Only default objects have per-page dirty tracking */
if (WARN_ON(obj->ops != &i915_gem_object_ops))
if (WARN_ON((obj->ops->flags & I915_GEM_OBJECT_HAS_STRUCT_PAGE) == 0))
return NULL;
page = i915_gem_object_get_page(obj, n);

View File

@ -789,9 +789,10 @@ i915_gem_userptr_dmabuf_export(struct drm_i915_gem_object *obj)
}
static const struct drm_i915_gem_object_ops i915_gem_userptr_ops = {
.dmabuf_export = i915_gem_userptr_dmabuf_export,
.flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE,
.get_pages = i915_gem_userptr_get_pages,
.put_pages = i915_gem_userptr_put_pages,
.dmabuf_export = i915_gem_userptr_dmabuf_export,
.release = i915_gem_userptr_release,
};

View File

@ -7514,7 +7514,7 @@ enum skl_disp_power_wells {
#define DPLL_CFGCR2_PDIV_7 (4<<2)
#define DPLL_CFGCR2_CENTRAL_FREQ_MASK (3)
#define DPLL_CFGCR1(id) _MMIO_PIPE((id) - SKL_DPLL1, _DPLL1_CFGCR1, _DPLL2_CFGCR2)
#define DPLL_CFGCR1(id) _MMIO_PIPE((id) - SKL_DPLL1, _DPLL1_CFGCR1, _DPLL2_CFGCR1)
#define DPLL_CFGCR2(id) _MMIO_PIPE((id) - SKL_DPLL1, _DPLL1_CFGCR2, _DPLL2_CFGCR2)
/* BXT display engine PLL */

View File

@ -49,7 +49,7 @@ static void i915_save_display(struct drm_device *dev)
dev_priv->regfile.savePP_ON_DELAYS = I915_READ(PCH_PP_ON_DELAYS);
dev_priv->regfile.savePP_OFF_DELAYS = I915_READ(PCH_PP_OFF_DELAYS);
dev_priv->regfile.savePP_DIVISOR = I915_READ(PCH_PP_DIVISOR);
} else if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) {
} else if (INTEL_INFO(dev)->gen <= 4) {
dev_priv->regfile.savePP_CONTROL = I915_READ(PP_CONTROL);
dev_priv->regfile.savePP_ON_DELAYS = I915_READ(PP_ON_DELAYS);
dev_priv->regfile.savePP_OFF_DELAYS = I915_READ(PP_OFF_DELAYS);
@ -84,7 +84,7 @@ static void i915_restore_display(struct drm_device *dev)
I915_WRITE(PCH_PP_OFF_DELAYS, dev_priv->regfile.savePP_OFF_DELAYS);
I915_WRITE(PCH_PP_DIVISOR, dev_priv->regfile.savePP_DIVISOR);
I915_WRITE(PCH_PP_CONTROL, dev_priv->regfile.savePP_CONTROL);
} else if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) {
} else if (INTEL_INFO(dev)->gen <= 4) {
I915_WRITE(PP_ON_DELAYS, dev_priv->regfile.savePP_ON_DELAYS);
I915_WRITE(PP_OFF_DELAYS, dev_priv->regfile.savePP_OFF_DELAYS);
I915_WRITE(PP_DIVISOR, dev_priv->regfile.savePP_DIVISOR);

View File

@ -1589,7 +1589,8 @@ skl_ddi_pll_select(struct intel_crtc *intel_crtc,
DPLL_CFGCR2_KDIV(wrpll_params.kdiv) |
DPLL_CFGCR2_PDIV(wrpll_params.pdiv) |
wrpll_params.central_freq;
} else if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT) {
} else if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
intel_encoder->type == INTEL_OUTPUT_DP_MST) {
switch (crtc_state->port_clock / 2) {
case 81000:
ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, 0);

View File

@ -215,27 +215,46 @@ intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp)
}
}
/*
* Pick training pattern for channel equalization. Training Pattern 3 for HBR2
* or 1.2 devices that support it, Training Pattern 2 otherwise.
*/
static u32 intel_dp_training_pattern(struct intel_dp *intel_dp)
{
u32 training_pattern = DP_TRAINING_PATTERN_2;
bool source_tps3, sink_tps3;
/*
* Intel platforms that support HBR2 also support TPS3. TPS3 support is
* also mandatory for downstream devices that support HBR2. However, not
* all sinks follow the spec.
*
* Due to WaDisableHBR2 SKL < B0 is the only exception where TPS3 is
* supported in source but still not enabled.
*/
source_tps3 = intel_dp_source_supports_hbr2(intel_dp);
sink_tps3 = drm_dp_tps3_supported(intel_dp->dpcd);
if (source_tps3 && sink_tps3) {
training_pattern = DP_TRAINING_PATTERN_3;
} else if (intel_dp->link_rate == 540000) {
if (!source_tps3)
DRM_DEBUG_KMS("5.4 Gbps link rate without source HBR2/TPS3 support\n");
if (!sink_tps3)
DRM_DEBUG_KMS("5.4 Gbps link rate without sink TPS3 support\n");
}
return training_pattern;
}
static void
intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp)
{
bool channel_eq = false;
int tries, cr_tries;
uint32_t training_pattern = DP_TRAINING_PATTERN_2;
u32 training_pattern;
/*
* Training Pattern 3 for HBR2 or 1.2 devices that support it.
*
* Intel platforms that support HBR2 also support TPS3. TPS3 support is
* also mandatory for downstream devices that support HBR2.
*
* Due to WaDisableHBR2 SKL < B0 is the only exception where TPS3 is
* supported but still not enabled.
*/
if (intel_dp_source_supports_hbr2(intel_dp) &&
drm_dp_tps3_supported(intel_dp->dpcd))
training_pattern = DP_TRAINING_PATTERN_3;
else if (intel_dp->link_rate == 540000)
DRM_ERROR("5.4 Gbps link rate without HBR2/TPS3 support\n");
training_pattern = intel_dp_training_pattern(intel_dp);
/* channel equalization */
if (!intel_dp_set_link_train(intel_dp,

View File

@ -204,10 +204,28 @@ static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data)
struct drm_device *dev = intel_dsi->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
if (dev_priv->vbt.dsi.seq_version >= 3)
data++;
gpio = *data++;
/* pull up/down */
action = *data++;
action = *data++ & 1;
if (gpio >= ARRAY_SIZE(gtable)) {
DRM_DEBUG_KMS("unknown gpio %u\n", gpio);
goto out;
}
if (!IS_VALLEYVIEW(dev_priv)) {
DRM_DEBUG_KMS("GPIO element not supported on this platform\n");
goto out;
}
if (dev_priv->vbt.dsi.seq_version >= 3) {
DRM_DEBUG_KMS("GPIO element v3 not supported\n");
goto out;
}
function = gtable[gpio].function_reg;
pad = gtable[gpio].pad_reg;
@ -226,6 +244,7 @@ static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data)
vlv_gpio_nc_write(dev_priv, pad, val);
mutex_unlock(&dev_priv->sb_lock);
out:
return data;
}

View File

@ -683,7 +683,7 @@ int intel_setup_gmbus(struct drm_device *dev)
return 0;
err:
while (--pin) {
while (pin--) {
if (!intel_gmbus_is_valid_pin(dev_priv, pin))
continue;

View File

@ -1783,16 +1783,20 @@ static uint32_t ilk_compute_cur_wm(const struct intel_crtc_state *cstate,
const struct intel_plane_state *pstate,
uint32_t mem_value)
{
int bpp = pstate->base.fb ? pstate->base.fb->bits_per_pixel / 8 : 0;
/*
* We treat the cursor plane as always-on for the purposes of watermark
* calculation. Until we have two-stage watermark programming merged,
* this is necessary to avoid flickering.
*/
int cpp = 4;
int width = pstate->visible ? pstate->base.crtc_w : 64;
if (!cstate->base.active || !pstate->visible)
if (!cstate->base.active)
return 0;
return ilk_wm_method2(ilk_pipe_pixel_rate(cstate),
cstate->base.adjusted_mode.crtc_htotal,
drm_rect_width(&pstate->dst),
bpp,
mem_value);
width, cpp, mem_value);
}
/* Only for WM_LP. */