forked from Minki/linux
Merge tag 'drm-intel-next-2015-11-20-merged' of git://anongit.freedesktop.org/drm-intel into drm-next
drm-intel-next-2015-11-20-rebased: 4 weeks because of my vacation, so a bit more: - final bits of the typesafe register mmio functions (Ville) - power domain fix for hdmi detection (Imre) - tons of fixes and improvements to the psr code (Rodrigo) - refactoring of the dp detection code (Ander) - complete rework of the dmc loader and dc5/dc6 handling (Imre, Patrik and others) - dp compliance improvements from Shubhangi Shrivastava - stop_machine hack from Chris to fix corruptions when updating GTT ptes on bsw - lots of fifo underrun fixes from Ville - big pile of fbc fixes and improvements from Paulo - fix fbdev failures paths (Tvrtko and Lukas Wunner) - dp link training refactoring (Ander) - interruptible prepare_plane for atomic (Maarten) - basic kabylake support (Deepak&Rodrigo) - don't leak ringspace on resets (Chris) drm-intel-next-2015-10-23: - 2nd attempt at atomic watermarks from Matt, but just prep for now - fixes all over * tag 'drm-intel-next-2015-11-20-merged' of git://anongit.freedesktop.org/drm-intel: (209 commits) drm/i915: Update DRIVER_DATE to 20151120 drm/i915: take a power domain reference while checking the HDMI live status drm/i915: take a power domain ref only when needed during HDMI detect drm/i915: Tear down fbdev if initialization fails async: export current_is_async() Revert "drm/i915: Initialize HWS page address after GPU reset" drm/i915: Fix oops caused by fbdev initialization failure drm/i915: Fix i915_ggtt_view_equal to handle rotation correctly drm/i915: Stuff rotation params into view union drm/i915: Drop return value from intel_fill_fb_ggtt_view drm/i915 : Fix to remove unnecsessary checks in postclose function. drm/i915: add MISSING_CASE to a few port/aux power domain helpers drm/i915/ddi: fix intel_display_port_aux_power_domain() after HDMI detect drm/i915: Remove platform specific *_dp_detect() functions drm/i915: Don't do edp panel detection in g4x_dp_detect() drm/i915: Send TP1 TP2/3 even when panel claims no NO_TRAIN_ON_EXIT. drm/i915: PSR: Don't Skip aux handshake on DP_PSR_NO_TRAIN_ON_EXIT. drm/i915: Reduce PSR re-activation time for VLV/CHV. drm/i915: Delay first PSR activation. drm/i915: Type safe register read/write ...
This commit is contained in:
commit
80d69009ef
@ -4177,17 +4177,21 @@ int num_ioctls;</synopsis>
|
||||
</sect2>
|
||||
</sect1>
|
||||
<sect1>
|
||||
<title>GuC-based Command Submission</title>
|
||||
<title>GuC</title>
|
||||
<sect2>
|
||||
<title>GuC</title>
|
||||
<title>GuC-specific firmware loader</title>
|
||||
!Pdrivers/gpu/drm/i915/intel_guc_loader.c GuC-specific firmware loader
|
||||
!Idrivers/gpu/drm/i915/intel_guc_loader.c
|
||||
</sect2>
|
||||
<sect2>
|
||||
<title>GuC Client</title>
|
||||
!Pdrivers/gpu/drm/i915/i915_guc_submission.c GuC-based command submissison
|
||||
<title>GuC-based command submission</title>
|
||||
!Pdrivers/gpu/drm/i915/i915_guc_submission.c GuC-based command submission
|
||||
!Idrivers/gpu/drm/i915/i915_guc_submission.c
|
||||
</sect2>
|
||||
<sect2>
|
||||
<title>GuC Firmware Layout</title>
|
||||
!Pdrivers/gpu/drm/i915/intel_guc_fwif.h GuC Firmware Layout
|
||||
</sect2>
|
||||
</sect1>
|
||||
|
||||
<sect1>
|
||||
|
@ -547,6 +547,7 @@ static const struct pci_device_id intel_stolen_ids[] __initconst = {
|
||||
INTEL_CHV_IDS(&chv_stolen_funcs),
|
||||
INTEL_SKL_IDS(&gen9_stolen_funcs),
|
||||
INTEL_BXT_IDS(&gen9_stolen_funcs),
|
||||
INTEL_KBL_IDS(&gen9_stolen_funcs),
|
||||
};
|
||||
|
||||
static void __init intel_graphics_stolen(int num, int slot, int func)
|
||||
|
@ -10,6 +10,7 @@ config DRM_I915
|
||||
# the shmem_readpage() which depends upon tmpfs
|
||||
select SHMEM
|
||||
select TMPFS
|
||||
select STOP_MACHINE
|
||||
select DRM_KMS_HELPER
|
||||
select DRM_PANEL
|
||||
select DRM_MIPI_DSI
|
||||
|
@ -77,6 +77,7 @@ i915-y += dvo_ch7017.o \
|
||||
dvo_tfp410.o \
|
||||
intel_crt.o \
|
||||
intel_ddi.o \
|
||||
intel_dp_link_training.o \
|
||||
intel_dp_mst.o \
|
||||
intel_dp.o \
|
||||
intel_dsi.o \
|
||||
|
@ -32,7 +32,8 @@ struct intel_dvo_device {
|
||||
const char *name;
|
||||
int type;
|
||||
/* DVOA/B/C output register */
|
||||
u32 dvo_reg;
|
||||
i915_reg_t dvo_reg;
|
||||
i915_reg_t dvo_srcdim_reg;
|
||||
/* GPIO register used for i2c bus to control this device */
|
||||
u32 gpio;
|
||||
int slave_addr;
|
||||
|
@ -407,14 +407,14 @@ static const struct drm_i915_cmd_table hsw_blt_ring_cmds[] = {
|
||||
* LRI.
|
||||
*/
|
||||
struct drm_i915_reg_descriptor {
|
||||
u32 addr;
|
||||
i915_reg_t addr;
|
||||
u32 mask;
|
||||
u32 value;
|
||||
};
|
||||
|
||||
/* Convenience macro for adding 32-bit registers. */
|
||||
#define REG32(address, ...) \
|
||||
{ .addr = address, __VA_ARGS__ }
|
||||
#define REG32(_reg, ...) \
|
||||
{ .addr = (_reg), __VA_ARGS__ }
|
||||
|
||||
/*
|
||||
* Convenience macro for adding 64-bit registers.
|
||||
@ -423,8 +423,13 @@ struct drm_i915_reg_descriptor {
|
||||
* access commands only allow 32-bit accesses. Hence, we have to include
|
||||
* entries for both halves of the 64-bit registers.
|
||||
*/
|
||||
#define REG64(addr) \
|
||||
REG32(addr), REG32(addr + sizeof(u32))
|
||||
#define REG64(_reg) \
|
||||
{ .addr = _reg }, \
|
||||
{ .addr = _reg ## _UDW }
|
||||
|
||||
#define REG64_IDX(_reg, idx) \
|
||||
{ .addr = _reg(idx) }, \
|
||||
{ .addr = _reg ## _UDW(idx) }
|
||||
|
||||
static const struct drm_i915_reg_descriptor gen7_render_regs[] = {
|
||||
REG64(GPGPU_THREADS_DISPATCHED),
|
||||
@ -451,14 +456,14 @@ static const struct drm_i915_reg_descriptor gen7_render_regs[] = {
|
||||
REG32(GEN7_GPGPU_DISPATCHDIMX),
|
||||
REG32(GEN7_GPGPU_DISPATCHDIMY),
|
||||
REG32(GEN7_GPGPU_DISPATCHDIMZ),
|
||||
REG64(GEN7_SO_NUM_PRIMS_WRITTEN(0)),
|
||||
REG64(GEN7_SO_NUM_PRIMS_WRITTEN(1)),
|
||||
REG64(GEN7_SO_NUM_PRIMS_WRITTEN(2)),
|
||||
REG64(GEN7_SO_NUM_PRIMS_WRITTEN(3)),
|
||||
REG64(GEN7_SO_PRIM_STORAGE_NEEDED(0)),
|
||||
REG64(GEN7_SO_PRIM_STORAGE_NEEDED(1)),
|
||||
REG64(GEN7_SO_PRIM_STORAGE_NEEDED(2)),
|
||||
REG64(GEN7_SO_PRIM_STORAGE_NEEDED(3)),
|
||||
REG64_IDX(GEN7_SO_NUM_PRIMS_WRITTEN, 0),
|
||||
REG64_IDX(GEN7_SO_NUM_PRIMS_WRITTEN, 1),
|
||||
REG64_IDX(GEN7_SO_NUM_PRIMS_WRITTEN, 2),
|
||||
REG64_IDX(GEN7_SO_NUM_PRIMS_WRITTEN, 3),
|
||||
REG64_IDX(GEN7_SO_PRIM_STORAGE_NEEDED, 0),
|
||||
REG64_IDX(GEN7_SO_PRIM_STORAGE_NEEDED, 1),
|
||||
REG64_IDX(GEN7_SO_PRIM_STORAGE_NEEDED, 2),
|
||||
REG64_IDX(GEN7_SO_PRIM_STORAGE_NEEDED, 3),
|
||||
REG32(GEN7_SO_WRITE_OFFSET(0)),
|
||||
REG32(GEN7_SO_WRITE_OFFSET(1)),
|
||||
REG32(GEN7_SO_WRITE_OFFSET(2)),
|
||||
@ -592,7 +597,7 @@ static bool check_sorted(int ring_id,
|
||||
bool ret = true;
|
||||
|
||||
for (i = 0; i < reg_count; i++) {
|
||||
u32 curr = reg_table[i].addr;
|
||||
u32 curr = i915_mmio_reg_offset(reg_table[i].addr);
|
||||
|
||||
if (curr < previous) {
|
||||
DRM_ERROR("CMD: table not sorted ring=%d entry=%d reg=0x%08X prev=0x%08X\n",
|
||||
@ -847,7 +852,7 @@ find_reg(const struct drm_i915_reg_descriptor *table,
|
||||
int i;
|
||||
|
||||
for (i = 0; i < count; i++) {
|
||||
if (table[i].addr == addr)
|
||||
if (i915_mmio_reg_offset(table[i].addr) == addr)
|
||||
return &table[i];
|
||||
}
|
||||
}
|
||||
@ -1023,7 +1028,7 @@ static bool check_cmd(const struct intel_engine_cs *ring,
|
||||
* to the register. Hence, limit OACONTROL writes to
|
||||
* only MI_LOAD_REGISTER_IMM commands.
|
||||
*/
|
||||
if (reg_addr == OACONTROL) {
|
||||
if (reg_addr == i915_mmio_reg_offset(OACONTROL)) {
|
||||
if (desc->cmd.value == MI_LOAD_REGISTER_MEM) {
|
||||
DRM_DEBUG_DRIVER("CMD: Rejected LRM to OACONTROL\n");
|
||||
return false;
|
||||
|
@ -1252,18 +1252,21 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
|
||||
|
||||
max_freq = (IS_BROXTON(dev) ? rp_state_cap >> 0 :
|
||||
rp_state_cap >> 16) & 0xff;
|
||||
max_freq *= (IS_SKYLAKE(dev) ? GEN9_FREQ_SCALER : 1);
|
||||
max_freq *= (IS_SKYLAKE(dev) || IS_KABYLAKE(dev) ?
|
||||
GEN9_FREQ_SCALER : 1);
|
||||
seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
|
||||
intel_gpu_freq(dev_priv, max_freq));
|
||||
|
||||
max_freq = (rp_state_cap & 0xff00) >> 8;
|
||||
max_freq *= (IS_SKYLAKE(dev) ? GEN9_FREQ_SCALER : 1);
|
||||
max_freq *= (IS_SKYLAKE(dev) || IS_KABYLAKE(dev) ?
|
||||
GEN9_FREQ_SCALER : 1);
|
||||
seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
|
||||
intel_gpu_freq(dev_priv, max_freq));
|
||||
|
||||
max_freq = (IS_BROXTON(dev) ? rp_state_cap >> 16 :
|
||||
rp_state_cap >> 0) & 0xff;
|
||||
max_freq *= (IS_SKYLAKE(dev) ? GEN9_FREQ_SCALER : 1);
|
||||
max_freq *= (IS_SKYLAKE(dev) || IS_KABYLAKE(dev) ?
|
||||
GEN9_FREQ_SCALER : 1);
|
||||
seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
|
||||
intel_gpu_freq(dev_priv, max_freq));
|
||||
seq_printf(m, "Max overclocked frequency: %dMHz\n",
|
||||
@ -1523,7 +1526,7 @@ static int gen6_drpc_info(struct seq_file *m)
|
||||
seq_printf(m, "RC information accurate: %s\n", yesno(count < 51));
|
||||
}
|
||||
|
||||
gt_core_status = readl(dev_priv->regs + GEN6_GT_CORE_STATUS);
|
||||
gt_core_status = I915_READ_FW(GEN6_GT_CORE_STATUS);
|
||||
trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4, true);
|
||||
|
||||
rpmodectl1 = I915_READ(GEN6_RP_CONTROL);
|
||||
@ -1640,7 +1643,7 @@ static int i915_fbc_status(struct seq_file *m, void *unused)
|
||||
seq_puts(m, "FBC enabled\n");
|
||||
else
|
||||
seq_printf(m, "FBC disabled: %s\n",
|
||||
intel_no_fbc_reason_str(dev_priv->fbc.no_fbc_reason));
|
||||
dev_priv->fbc.no_fbc_reason);
|
||||
|
||||
if (INTEL_INFO(dev_priv)->gen >= 7)
|
||||
seq_printf(m, "Compressing: %s\n",
|
||||
@ -1801,7 +1804,7 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
if (IS_SKYLAKE(dev)) {
|
||||
if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
|
||||
/* Convert GT frequency to 50 HZ units */
|
||||
min_gpu_freq =
|
||||
dev_priv->rps.min_freq_softlimit / GEN9_FREQ_SCALER;
|
||||
@ -1821,7 +1824,8 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
|
||||
&ia_freq);
|
||||
seq_printf(m, "%d\t\t%d\t\t\t\t%d\n",
|
||||
intel_gpu_freq(dev_priv, (gpu_freq *
|
||||
(IS_SKYLAKE(dev) ? GEN9_FREQ_SCALER : 1))),
|
||||
(IS_SKYLAKE(dev) || IS_KABYLAKE(dev) ?
|
||||
GEN9_FREQ_SCALER : 1))),
|
||||
((ia_freq >> 0) & 0xff) * 100,
|
||||
((ia_freq >> 8) & 0xff) * 100);
|
||||
}
|
||||
@ -1873,17 +1877,19 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
ifbdev = dev_priv->fbdev;
|
||||
fb = to_intel_framebuffer(ifbdev->helper.fb);
|
||||
if (ifbdev) {
|
||||
fb = to_intel_framebuffer(ifbdev->helper.fb);
|
||||
|
||||
seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
|
||||
fb->base.width,
|
||||
fb->base.height,
|
||||
fb->base.depth,
|
||||
fb->base.bits_per_pixel,
|
||||
fb->base.modifier[0],
|
||||
atomic_read(&fb->base.refcount.refcount));
|
||||
describe_obj(m, fb->obj);
|
||||
seq_putc(m, '\n');
|
||||
seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
|
||||
fb->base.width,
|
||||
fb->base.height,
|
||||
fb->base.depth,
|
||||
fb->base.bits_per_pixel,
|
||||
fb->base.modifier[0],
|
||||
atomic_read(&fb->base.refcount.refcount));
|
||||
describe_obj(m, fb->obj);
|
||||
seq_putc(m, '\n');
|
||||
}
|
||||
#endif
|
||||
|
||||
mutex_lock(&dev->mode_config.fb_lock);
|
||||
@ -2402,6 +2408,12 @@ static int i915_guc_load_status_info(struct seq_file *m, void *data)
|
||||
guc_fw->guc_fw_major_wanted, guc_fw->guc_fw_minor_wanted);
|
||||
seq_printf(m, "\tversion found: %d.%d\n",
|
||||
guc_fw->guc_fw_major_found, guc_fw->guc_fw_minor_found);
|
||||
seq_printf(m, "\theader: offset is %d; size = %d\n",
|
||||
guc_fw->header_offset, guc_fw->header_size);
|
||||
seq_printf(m, "\tuCode: offset is %d; size = %d\n",
|
||||
guc_fw->ucode_offset, guc_fw->ucode_size);
|
||||
seq_printf(m, "\tRSA: offset is %d; size = %d\n",
|
||||
guc_fw->rsa_offset, guc_fw->rsa_size);
|
||||
|
||||
tmp = I915_READ(GUC_STATUS);
|
||||
|
||||
@ -2550,7 +2562,7 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
|
||||
yesno(work_busy(&dev_priv->psr.work.work)));
|
||||
|
||||
if (HAS_DDI(dev))
|
||||
enabled = I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE;
|
||||
enabled = I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE;
|
||||
else {
|
||||
for_each_pipe(dev_priv, pipe) {
|
||||
stat[pipe] = I915_READ(VLV_PSRSTAT(pipe)) &
|
||||
@ -2572,7 +2584,7 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
|
||||
|
||||
/* CHV PSR has no kind of performance counter */
|
||||
if (HAS_DDI(dev)) {
|
||||
psrperf = I915_READ(EDP_PSR_PERF_CNT(dev)) &
|
||||
psrperf = I915_READ(EDP_PSR_PERF_CNT) &
|
||||
EDP_PSR_PERF_CNT_MASK;
|
||||
|
||||
seq_printf(m, "Performance_Counter: %u\n", psrperf);
|
||||
@ -2696,24 +2708,16 @@ static const char *power_domain_str(enum intel_display_power_domain domain)
|
||||
return "TRANSCODER_C";
|
||||
case POWER_DOMAIN_TRANSCODER_EDP:
|
||||
return "TRANSCODER_EDP";
|
||||
case POWER_DOMAIN_PORT_DDI_A_2_LANES:
|
||||
return "PORT_DDI_A_2_LANES";
|
||||
case POWER_DOMAIN_PORT_DDI_A_4_LANES:
|
||||
return "PORT_DDI_A_4_LANES";
|
||||
case POWER_DOMAIN_PORT_DDI_B_2_LANES:
|
||||
return "PORT_DDI_B_2_LANES";
|
||||
case POWER_DOMAIN_PORT_DDI_B_4_LANES:
|
||||
return "PORT_DDI_B_4_LANES";
|
||||
case POWER_DOMAIN_PORT_DDI_C_2_LANES:
|
||||
return "PORT_DDI_C_2_LANES";
|
||||
case POWER_DOMAIN_PORT_DDI_C_4_LANES:
|
||||
return "PORT_DDI_C_4_LANES";
|
||||
case POWER_DOMAIN_PORT_DDI_D_2_LANES:
|
||||
return "PORT_DDI_D_2_LANES";
|
||||
case POWER_DOMAIN_PORT_DDI_D_4_LANES:
|
||||
return "PORT_DDI_D_4_LANES";
|
||||
case POWER_DOMAIN_PORT_DDI_E_2_LANES:
|
||||
return "PORT_DDI_E_2_LANES";
|
||||
case POWER_DOMAIN_PORT_DDI_A_LANES:
|
||||
return "PORT_DDI_A_LANES";
|
||||
case POWER_DOMAIN_PORT_DDI_B_LANES:
|
||||
return "PORT_DDI_B_LANES";
|
||||
case POWER_DOMAIN_PORT_DDI_C_LANES:
|
||||
return "PORT_DDI_C_LANES";
|
||||
case POWER_DOMAIN_PORT_DDI_D_LANES:
|
||||
return "PORT_DDI_D_LANES";
|
||||
case POWER_DOMAIN_PORT_DDI_E_LANES:
|
||||
return "PORT_DDI_E_LANES";
|
||||
case POWER_DOMAIN_PORT_DSI:
|
||||
return "PORT_DSI";
|
||||
case POWER_DOMAIN_PORT_CRT:
|
||||
@ -2734,6 +2738,10 @@ static const char *power_domain_str(enum intel_display_power_domain domain)
|
||||
return "AUX_C";
|
||||
case POWER_DOMAIN_AUX_D:
|
||||
return "AUX_D";
|
||||
case POWER_DOMAIN_GMBUS:
|
||||
return "GMBUS";
|
||||
case POWER_DOMAIN_MODESET:
|
||||
return "MODESET";
|
||||
case POWER_DOMAIN_INIT:
|
||||
return "INIT";
|
||||
default:
|
||||
@ -2777,6 +2785,51 @@ static int i915_power_domain_info(struct seq_file *m, void *unused)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int i915_dmc_info(struct seq_file *m, void *unused)
|
||||
{
|
||||
struct drm_info_node *node = m->private;
|
||||
struct drm_device *dev = node->minor->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_csr *csr;
|
||||
|
||||
if (!HAS_CSR(dev)) {
|
||||
seq_puts(m, "not supported\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
csr = &dev_priv->csr;
|
||||
|
||||
intel_runtime_pm_get(dev_priv);
|
||||
|
||||
seq_printf(m, "fw loaded: %s\n", yesno(csr->dmc_payload != NULL));
|
||||
seq_printf(m, "path: %s\n", csr->fw_path);
|
||||
|
||||
if (!csr->dmc_payload)
|
||||
goto out;
|
||||
|
||||
seq_printf(m, "version: %d.%d\n", CSR_VERSION_MAJOR(csr->version),
|
||||
CSR_VERSION_MINOR(csr->version));
|
||||
|
||||
if (IS_SKYLAKE(dev) && csr->version >= CSR_VERSION(1, 6)) {
|
||||
seq_printf(m, "DC3 -> DC5 count: %d\n",
|
||||
I915_READ(SKL_CSR_DC3_DC5_COUNT));
|
||||
seq_printf(m, "DC5 -> DC6 count: %d\n",
|
||||
I915_READ(SKL_CSR_DC5_DC6_COUNT));
|
||||
} else if (IS_BROXTON(dev) && csr->version >= CSR_VERSION(1, 4)) {
|
||||
seq_printf(m, "DC3 -> DC5 count: %d\n",
|
||||
I915_READ(BXT_CSR_DC3_DC5_COUNT));
|
||||
}
|
||||
|
||||
out:
|
||||
seq_printf(m, "program base: 0x%08x\n", I915_READ(CSR_PROGRAM(0)));
|
||||
seq_printf(m, "ssp base: 0x%08x\n", I915_READ(CSR_SSP_BASE));
|
||||
seq_printf(m, "htp: 0x%08x\n", I915_READ(CSR_HTP_SKL));
|
||||
|
||||
intel_runtime_pm_put(dev_priv);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void intel_seq_print_mode(struct seq_file *m, int tabs,
|
||||
struct drm_display_mode *mode)
|
||||
{
|
||||
@ -2944,6 +2997,107 @@ static bool cursor_position(struct drm_device *dev, int pipe, int *x, int *y)
|
||||
return cursor_active(dev, pipe);
|
||||
}
|
||||
|
||||
static const char *plane_type(enum drm_plane_type type)
|
||||
{
|
||||
switch (type) {
|
||||
case DRM_PLANE_TYPE_OVERLAY:
|
||||
return "OVL";
|
||||
case DRM_PLANE_TYPE_PRIMARY:
|
||||
return "PRI";
|
||||
case DRM_PLANE_TYPE_CURSOR:
|
||||
return "CUR";
|
||||
/*
|
||||
* Deliberately omitting default: to generate compiler warnings
|
||||
* when a new drm_plane_type gets added.
|
||||
*/
|
||||
}
|
||||
|
||||
return "unknown";
|
||||
}
|
||||
|
||||
static const char *plane_rotation(unsigned int rotation)
|
||||
{
|
||||
static char buf[48];
|
||||
/*
|
||||
* According to doc only one DRM_ROTATE_ is allowed but this
|
||||
* will print them all to visualize if the values are misused
|
||||
*/
|
||||
snprintf(buf, sizeof(buf),
|
||||
"%s%s%s%s%s%s(0x%08x)",
|
||||
(rotation & BIT(DRM_ROTATE_0)) ? "0 " : "",
|
||||
(rotation & BIT(DRM_ROTATE_90)) ? "90 " : "",
|
||||
(rotation & BIT(DRM_ROTATE_180)) ? "180 " : "",
|
||||
(rotation & BIT(DRM_ROTATE_270)) ? "270 " : "",
|
||||
(rotation & BIT(DRM_REFLECT_X)) ? "FLIPX " : "",
|
||||
(rotation & BIT(DRM_REFLECT_Y)) ? "FLIPY " : "",
|
||||
rotation);
|
||||
|
||||
return buf;
|
||||
}
|
||||
|
||||
static void intel_plane_info(struct seq_file *m, struct intel_crtc *intel_crtc)
|
||||
{
|
||||
struct drm_info_node *node = m->private;
|
||||
struct drm_device *dev = node->minor->dev;
|
||||
struct intel_plane *intel_plane;
|
||||
|
||||
for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
|
||||
struct drm_plane_state *state;
|
||||
struct drm_plane *plane = &intel_plane->base;
|
||||
|
||||
if (!plane->state) {
|
||||
seq_puts(m, "plane->state is NULL!\n");
|
||||
continue;
|
||||
}
|
||||
|
||||
state = plane->state;
|
||||
|
||||
seq_printf(m, "\t--Plane id %d: type=%s, crtc_pos=%4dx%4d, crtc_size=%4dx%4d, src_pos=%d.%04ux%d.%04u, src_size=%d.%04ux%d.%04u, format=%s, rotation=%s\n",
|
||||
plane->base.id,
|
||||
plane_type(intel_plane->base.type),
|
||||
state->crtc_x, state->crtc_y,
|
||||
state->crtc_w, state->crtc_h,
|
||||
(state->src_x >> 16),
|
||||
((state->src_x & 0xffff) * 15625) >> 10,
|
||||
(state->src_y >> 16),
|
||||
((state->src_y & 0xffff) * 15625) >> 10,
|
||||
(state->src_w >> 16),
|
||||
((state->src_w & 0xffff) * 15625) >> 10,
|
||||
(state->src_h >> 16),
|
||||
((state->src_h & 0xffff) * 15625) >> 10,
|
||||
state->fb ? drm_get_format_name(state->fb->pixel_format) : "N/A",
|
||||
plane_rotation(state->rotation));
|
||||
}
|
||||
}
|
||||
|
||||
static void intel_scaler_info(struct seq_file *m, struct intel_crtc *intel_crtc)
|
||||
{
|
||||
struct intel_crtc_state *pipe_config;
|
||||
int num_scalers = intel_crtc->num_scalers;
|
||||
int i;
|
||||
|
||||
pipe_config = to_intel_crtc_state(intel_crtc->base.state);
|
||||
|
||||
/* Not all platformas have a scaler */
|
||||
if (num_scalers) {
|
||||
seq_printf(m, "\tnum_scalers=%d, scaler_users=%x scaler_id=%d",
|
||||
num_scalers,
|
||||
pipe_config->scaler_state.scaler_users,
|
||||
pipe_config->scaler_state.scaler_id);
|
||||
|
||||
for (i = 0; i < SKL_NUM_SCALERS; i++) {
|
||||
struct intel_scaler *sc =
|
||||
&pipe_config->scaler_state.scalers[i];
|
||||
|
||||
seq_printf(m, ", scalers[%d]: use=%s, mode=%x",
|
||||
i, yesno(sc->in_use), sc->mode);
|
||||
}
|
||||
seq_puts(m, "\n");
|
||||
} else {
|
||||
seq_puts(m, "\tNo scalers available on this platform\n");
|
||||
}
|
||||
}
|
||||
|
||||
static int i915_display_info(struct seq_file *m, void *unused)
|
||||
{
|
||||
struct drm_info_node *node = m->private;
|
||||
@ -2963,10 +3117,12 @@ static int i915_display_info(struct seq_file *m, void *unused)
|
||||
|
||||
pipe_config = to_intel_crtc_state(crtc->base.state);
|
||||
|
||||
seq_printf(m, "CRTC %d: pipe: %c, active=%s (size=%dx%d)\n",
|
||||
seq_printf(m, "CRTC %d: pipe: %c, active=%s, (size=%dx%d), dither=%s, bpp=%d\n",
|
||||
crtc->base.base.id, pipe_name(crtc->pipe),
|
||||
yesno(pipe_config->base.active),
|
||||
pipe_config->pipe_src_w, pipe_config->pipe_src_h);
|
||||
pipe_config->pipe_src_w, pipe_config->pipe_src_h,
|
||||
yesno(pipe_config->dither), pipe_config->pipe_bpp);
|
||||
|
||||
if (pipe_config->base.active) {
|
||||
intel_crtc_info(m, crtc);
|
||||
|
||||
@ -2976,6 +3132,8 @@ static int i915_display_info(struct seq_file *m, void *unused)
|
||||
x, y, crtc->base.cursor->state->crtc_w,
|
||||
crtc->base.cursor->state->crtc_h,
|
||||
crtc->cursor_addr, yesno(active));
|
||||
intel_scaler_info(m, crtc);
|
||||
intel_plane_info(m, crtc);
|
||||
}
|
||||
|
||||
seq_printf(m, "\tunderrun reporting: cpu=%s pch=%s \n",
|
||||
@ -3110,7 +3268,8 @@ static int i915_wa_registers(struct seq_file *m, void *unused)
|
||||
|
||||
seq_printf(m, "Workarounds applied: %d\n", dev_priv->workarounds.count);
|
||||
for (i = 0; i < dev_priv->workarounds.count; ++i) {
|
||||
u32 addr, mask, value, read;
|
||||
i915_reg_t addr;
|
||||
u32 mask, value, read;
|
||||
bool ok;
|
||||
|
||||
addr = dev_priv->workarounds.reg[i].addr;
|
||||
@ -3119,7 +3278,7 @@ static int i915_wa_registers(struct seq_file *m, void *unused)
|
||||
read = I915_READ(addr);
|
||||
ok = (value & mask) == (read & mask);
|
||||
seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X, read: 0x%08x, status: %s\n",
|
||||
addr, value, mask, read, ok ? "OK" : "FAIL");
|
||||
i915_mmio_reg_offset(addr), value, mask, read, ok ? "OK" : "FAIL");
|
||||
}
|
||||
|
||||
intel_runtime_pm_put(dev_priv);
|
||||
@ -5023,7 +5182,7 @@ static void gen9_sseu_device_status(struct drm_device *dev,
|
||||
|
||||
stat->slice_total++;
|
||||
|
||||
if (IS_SKYLAKE(dev))
|
||||
if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
|
||||
ss_cnt = INTEL_INFO(dev)->subslice_per_slice;
|
||||
|
||||
for (ss = 0; ss < ss_max; ss++) {
|
||||
@ -5236,6 +5395,7 @@ static const struct drm_info_list i915_debugfs_list[] = {
|
||||
{"i915_energy_uJ", i915_energy_uJ, 0},
|
||||
{"i915_runtime_pm_status", i915_runtime_pm_status, 0},
|
||||
{"i915_power_domain_info", i915_power_domain_info, 0},
|
||||
{"i915_dmc_info", i915_dmc_info, 0},
|
||||
{"i915_display_info", i915_display_info, 0},
|
||||
{"i915_semaphore_status", i915_semaphore_status, 0},
|
||||
{"i915_shared_dplls_info", i915_shared_dplls_info, 0},
|
||||
|
@ -28,7 +28,6 @@
|
||||
|
||||
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
||||
|
||||
#include <linux/async.h>
|
||||
#include <drm/drmP.h>
|
||||
#include <drm/drm_crtc_helper.h>
|
||||
#include <drm/drm_fb_helper.h>
|
||||
@ -338,7 +337,7 @@ static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_
|
||||
i915_resume_switcheroo(dev);
|
||||
dev->switch_power_state = DRM_SWITCH_POWER_ON;
|
||||
} else {
|
||||
pr_err("switched off\n");
|
||||
pr_info("switched off\n");
|
||||
dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
|
||||
i915_suspend_switcheroo(dev, pmm);
|
||||
dev->switch_power_state = DRM_SWITCH_POWER_OFF;
|
||||
@ -396,7 +395,9 @@ static int i915_load_modeset_init(struct drm_device *dev)
|
||||
if (ret)
|
||||
goto cleanup_vga_switcheroo;
|
||||
|
||||
intel_power_domains_init_hw(dev_priv);
|
||||
intel_power_domains_init_hw(dev_priv, false);
|
||||
|
||||
intel_csr_ucode_init(dev_priv);
|
||||
|
||||
ret = intel_irq_install(dev_priv);
|
||||
if (ret)
|
||||
@ -437,7 +438,7 @@ static int i915_load_modeset_init(struct drm_device *dev)
|
||||
* scanning against hotplug events. Hence do this first and ignore the
|
||||
* tiny window where we will loose hotplug notifactions.
|
||||
*/
|
||||
async_schedule(intel_fbdev_initial_config, dev_priv);
|
||||
intel_fbdev_initial_config_async(dev);
|
||||
|
||||
drm_kms_helper_poll_init(dev);
|
||||
|
||||
@ -663,7 +664,8 @@ static void gen9_sseu_info_init(struct drm_device *dev)
|
||||
* supports EU power gating on devices with more than one EU
|
||||
* pair per subslice.
|
||||
*/
|
||||
info->has_slice_pg = (IS_SKYLAKE(dev) && (info->slice_total > 1));
|
||||
info->has_slice_pg = ((IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) &&
|
||||
(info->slice_total > 1));
|
||||
info->has_subslice_pg = (IS_BROXTON(dev) && (info->subslice_total > 1));
|
||||
info->has_eu_pg = (info->eu_per_subslice > 2);
|
||||
}
|
||||
@ -890,7 +892,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
|
||||
spin_lock_init(&dev_priv->mmio_flip_lock);
|
||||
mutex_init(&dev_priv->sb_lock);
|
||||
mutex_init(&dev_priv->modeset_restore_lock);
|
||||
mutex_init(&dev_priv->csr_lock);
|
||||
mutex_init(&dev_priv->av_mutex);
|
||||
|
||||
intel_pm_setup(dev);
|
||||
@ -937,9 +938,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
|
||||
|
||||
intel_uncore_init(dev);
|
||||
|
||||
/* Load CSR Firmware for SKL */
|
||||
intel_csr_ucode_init(dev);
|
||||
|
||||
ret = i915_gem_gtt_init(dev);
|
||||
if (ret)
|
||||
goto out_freecsr;
|
||||
@ -1113,7 +1111,7 @@ out_mtrrfree:
|
||||
out_gtt:
|
||||
i915_global_gtt_cleanup(dev);
|
||||
out_freecsr:
|
||||
intel_csr_ucode_fini(dev);
|
||||
intel_csr_ucode_fini(dev_priv);
|
||||
intel_uncore_fini(dev);
|
||||
pci_iounmap(dev->pdev, dev_priv->regs);
|
||||
put_bridge:
|
||||
@ -1131,6 +1129,8 @@ int i915_driver_unload(struct drm_device *dev)
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
int ret;
|
||||
|
||||
intel_fbdev_fini(dev);
|
||||
|
||||
i915_audio_component_cleanup(dev_priv);
|
||||
|
||||
ret = i915_gem_suspend(dev);
|
||||
@ -1153,8 +1153,6 @@ int i915_driver_unload(struct drm_device *dev)
|
||||
|
||||
acpi_video_unregister();
|
||||
|
||||
intel_fbdev_fini(dev);
|
||||
|
||||
drm_vblank_cleanup(dev);
|
||||
|
||||
intel_modeset_cleanup(dev);
|
||||
@ -1196,7 +1194,7 @@ int i915_driver_unload(struct drm_device *dev)
|
||||
intel_fbc_cleanup_cfb(dev_priv);
|
||||
i915_gem_cleanup_stolen(dev);
|
||||
|
||||
intel_csr_ucode_fini(dev);
|
||||
intel_csr_ucode_fini(dev_priv);
|
||||
|
||||
intel_teardown_gmbus(dev);
|
||||
intel_teardown_mchbar(dev);
|
||||
@ -1264,8 +1262,6 @@ void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
|
||||
{
|
||||
struct drm_i915_file_private *file_priv = file->driver_priv;
|
||||
|
||||
if (file_priv && file_priv->bsd_ring)
|
||||
file_priv->bsd_ring = NULL;
|
||||
kfree(file_priv);
|
||||
}
|
||||
|
||||
|
@ -383,6 +383,7 @@ static const struct intel_device_info intel_skylake_gt3_info = {
|
||||
|
||||
static const struct intel_device_info intel_broxton_info = {
|
||||
.is_preliminary = 1,
|
||||
.is_broxton = 1,
|
||||
.gen = 9,
|
||||
.need_gfx_hws = 1, .has_hotplug = 1,
|
||||
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
|
||||
@ -394,50 +395,81 @@ static const struct intel_device_info intel_broxton_info = {
|
||||
IVB_CURSOR_OFFSETS,
|
||||
};
|
||||
|
||||
static const struct intel_device_info intel_kabylake_info = {
|
||||
.is_preliminary = 1,
|
||||
.is_kabylake = 1,
|
||||
.gen = 9,
|
||||
.num_pipes = 3,
|
||||
.need_gfx_hws = 1, .has_hotplug = 1,
|
||||
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
|
||||
.has_llc = 1,
|
||||
.has_ddi = 1,
|
||||
.has_fpga_dbg = 1,
|
||||
.has_fbc = 1,
|
||||
GEN_DEFAULT_PIPEOFFSETS,
|
||||
IVB_CURSOR_OFFSETS,
|
||||
};
|
||||
|
||||
static const struct intel_device_info intel_kabylake_gt3_info = {
|
||||
.is_preliminary = 1,
|
||||
.is_kabylake = 1,
|
||||
.gen = 9,
|
||||
.num_pipes = 3,
|
||||
.need_gfx_hws = 1, .has_hotplug = 1,
|
||||
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
|
||||
.has_llc = 1,
|
||||
.has_ddi = 1,
|
||||
.has_fpga_dbg = 1,
|
||||
.has_fbc = 1,
|
||||
GEN_DEFAULT_PIPEOFFSETS,
|
||||
IVB_CURSOR_OFFSETS,
|
||||
};
|
||||
|
||||
/*
|
||||
* Make sure any device matches here are from most specific to most
|
||||
* general. For example, since the Quanta match is based on the subsystem
|
||||
* and subvendor IDs, we need it to come before the more general IVB
|
||||
* PCI ID matches, otherwise we'll use the wrong info struct above.
|
||||
*/
|
||||
#define INTEL_PCI_IDS \
|
||||
INTEL_I830_IDS(&intel_i830_info), \
|
||||
INTEL_I845G_IDS(&intel_845g_info), \
|
||||
INTEL_I85X_IDS(&intel_i85x_info), \
|
||||
INTEL_I865G_IDS(&intel_i865g_info), \
|
||||
INTEL_I915G_IDS(&intel_i915g_info), \
|
||||
INTEL_I915GM_IDS(&intel_i915gm_info), \
|
||||
INTEL_I945G_IDS(&intel_i945g_info), \
|
||||
INTEL_I945GM_IDS(&intel_i945gm_info), \
|
||||
INTEL_I965G_IDS(&intel_i965g_info), \
|
||||
INTEL_G33_IDS(&intel_g33_info), \
|
||||
INTEL_I965GM_IDS(&intel_i965gm_info), \
|
||||
INTEL_GM45_IDS(&intel_gm45_info), \
|
||||
INTEL_G45_IDS(&intel_g45_info), \
|
||||
INTEL_PINEVIEW_IDS(&intel_pineview_info), \
|
||||
INTEL_IRONLAKE_D_IDS(&intel_ironlake_d_info), \
|
||||
INTEL_IRONLAKE_M_IDS(&intel_ironlake_m_info), \
|
||||
INTEL_SNB_D_IDS(&intel_sandybridge_d_info), \
|
||||
INTEL_SNB_M_IDS(&intel_sandybridge_m_info), \
|
||||
INTEL_IVB_Q_IDS(&intel_ivybridge_q_info), /* must be first IVB */ \
|
||||
INTEL_IVB_M_IDS(&intel_ivybridge_m_info), \
|
||||
INTEL_IVB_D_IDS(&intel_ivybridge_d_info), \
|
||||
INTEL_HSW_D_IDS(&intel_haswell_d_info), \
|
||||
INTEL_HSW_M_IDS(&intel_haswell_m_info), \
|
||||
INTEL_VLV_M_IDS(&intel_valleyview_m_info), \
|
||||
INTEL_VLV_D_IDS(&intel_valleyview_d_info), \
|
||||
INTEL_BDW_GT12M_IDS(&intel_broadwell_m_info), \
|
||||
INTEL_BDW_GT12D_IDS(&intel_broadwell_d_info), \
|
||||
INTEL_BDW_GT3M_IDS(&intel_broadwell_gt3m_info), \
|
||||
INTEL_BDW_GT3D_IDS(&intel_broadwell_gt3d_info), \
|
||||
INTEL_CHV_IDS(&intel_cherryview_info), \
|
||||
INTEL_SKL_GT1_IDS(&intel_skylake_info), \
|
||||
INTEL_SKL_GT2_IDS(&intel_skylake_info), \
|
||||
INTEL_SKL_GT3_IDS(&intel_skylake_gt3_info), \
|
||||
INTEL_BXT_IDS(&intel_broxton_info)
|
||||
|
||||
static const struct pci_device_id pciidlist[] = { /* aka */
|
||||
INTEL_PCI_IDS,
|
||||
static const struct pci_device_id pciidlist[] = {
|
||||
INTEL_I830_IDS(&intel_i830_info),
|
||||
INTEL_I845G_IDS(&intel_845g_info),
|
||||
INTEL_I85X_IDS(&intel_i85x_info),
|
||||
INTEL_I865G_IDS(&intel_i865g_info),
|
||||
INTEL_I915G_IDS(&intel_i915g_info),
|
||||
INTEL_I915GM_IDS(&intel_i915gm_info),
|
||||
INTEL_I945G_IDS(&intel_i945g_info),
|
||||
INTEL_I945GM_IDS(&intel_i945gm_info),
|
||||
INTEL_I965G_IDS(&intel_i965g_info),
|
||||
INTEL_G33_IDS(&intel_g33_info),
|
||||
INTEL_I965GM_IDS(&intel_i965gm_info),
|
||||
INTEL_GM45_IDS(&intel_gm45_info),
|
||||
INTEL_G45_IDS(&intel_g45_info),
|
||||
INTEL_PINEVIEW_IDS(&intel_pineview_info),
|
||||
INTEL_IRONLAKE_D_IDS(&intel_ironlake_d_info),
|
||||
INTEL_IRONLAKE_M_IDS(&intel_ironlake_m_info),
|
||||
INTEL_SNB_D_IDS(&intel_sandybridge_d_info),
|
||||
INTEL_SNB_M_IDS(&intel_sandybridge_m_info),
|
||||
INTEL_IVB_Q_IDS(&intel_ivybridge_q_info), /* must be first IVB */
|
||||
INTEL_IVB_M_IDS(&intel_ivybridge_m_info),
|
||||
INTEL_IVB_D_IDS(&intel_ivybridge_d_info),
|
||||
INTEL_HSW_D_IDS(&intel_haswell_d_info),
|
||||
INTEL_HSW_M_IDS(&intel_haswell_m_info),
|
||||
INTEL_VLV_M_IDS(&intel_valleyview_m_info),
|
||||
INTEL_VLV_D_IDS(&intel_valleyview_d_info),
|
||||
INTEL_BDW_GT12M_IDS(&intel_broadwell_m_info),
|
||||
INTEL_BDW_GT12D_IDS(&intel_broadwell_d_info),
|
||||
INTEL_BDW_GT3M_IDS(&intel_broadwell_gt3m_info),
|
||||
INTEL_BDW_GT3D_IDS(&intel_broadwell_gt3d_info),
|
||||
INTEL_CHV_IDS(&intel_cherryview_info),
|
||||
INTEL_SKL_GT1_IDS(&intel_skylake_info),
|
||||
INTEL_SKL_GT2_IDS(&intel_skylake_info),
|
||||
INTEL_SKL_GT3_IDS(&intel_skylake_gt3_info),
|
||||
INTEL_BXT_IDS(&intel_broxton_info),
|
||||
INTEL_KBL_GT1_IDS(&intel_kabylake_info),
|
||||
INTEL_KBL_GT2_IDS(&intel_kabylake_info),
|
||||
INTEL_KBL_GT3_IDS(&intel_kabylake_gt3_info),
|
||||
INTEL_KBL_GT4_IDS(&intel_kabylake_gt3_info),
|
||||
{0, 0, 0}
|
||||
};
|
||||
|
||||
@ -463,7 +495,7 @@ static enum intel_pch intel_virt_detect_pch(struct drm_device *dev)
|
||||
} else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
|
||||
ret = PCH_LPT;
|
||||
DRM_DEBUG_KMS("Assuming LynxPoint PCH\n");
|
||||
} else if (IS_SKYLAKE(dev)) {
|
||||
} else if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
|
||||
ret = PCH_SPT;
|
||||
DRM_DEBUG_KMS("Assuming SunrisePoint PCH\n");
|
||||
}
|
||||
@ -526,11 +558,13 @@ void intel_detect_pch(struct drm_device *dev)
|
||||
} else if (id == INTEL_PCH_SPT_DEVICE_ID_TYPE) {
|
||||
dev_priv->pch_type = PCH_SPT;
|
||||
DRM_DEBUG_KMS("Found SunrisePoint PCH\n");
|
||||
WARN_ON(!IS_SKYLAKE(dev));
|
||||
WARN_ON(!IS_SKYLAKE(dev) &&
|
||||
!IS_KABYLAKE(dev));
|
||||
} else if (id == INTEL_PCH_SPT_LP_DEVICE_ID_TYPE) {
|
||||
dev_priv->pch_type = PCH_SPT;
|
||||
DRM_DEBUG_KMS("Found SunrisePoint LP PCH\n");
|
||||
WARN_ON(!IS_SKYLAKE(dev));
|
||||
WARN_ON(!IS_SKYLAKE(dev) &&
|
||||
!IS_KABYLAKE(dev));
|
||||
} else if (id == INTEL_PCH_P2X_DEVICE_ID_TYPE) {
|
||||
dev_priv->pch_type = intel_virt_detect_pch(dev);
|
||||
} else
|
||||
@ -570,26 +604,6 @@ bool i915_semaphore_is_enabled(struct drm_device *dev)
|
||||
return true;
|
||||
}
|
||||
|
||||
void i915_firmware_load_error_print(const char *fw_path, int err)
|
||||
{
|
||||
DRM_ERROR("failed to load firmware %s (%d)\n", fw_path, err);
|
||||
|
||||
/*
|
||||
* If the reason is not known assume -ENOENT since that's the most
|
||||
* usual failure mode.
|
||||
*/
|
||||
if (!err)
|
||||
err = -ENOENT;
|
||||
|
||||
if (!(IS_BUILTIN(CONFIG_DRM_I915) && err == -ENOENT))
|
||||
return;
|
||||
|
||||
DRM_ERROR(
|
||||
"The driver is built-in, so to load the firmware you need to\n"
|
||||
"include it either in the kernel (see CONFIG_EXTRA_FIRMWARE) or\n"
|
||||
"in your initrd/initramfs image.\n");
|
||||
}
|
||||
|
||||
static void intel_suspend_encoders(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
@ -608,7 +622,6 @@ static void intel_suspend_encoders(struct drm_i915_private *dev_priv)
|
||||
static int intel_suspend_complete(struct drm_i915_private *dev_priv);
|
||||
static int vlv_resume_prepare(struct drm_i915_private *dev_priv,
|
||||
bool rpm_resume);
|
||||
static int skl_resume_prepare(struct drm_i915_private *dev_priv);
|
||||
static int bxt_resume_prepare(struct drm_i915_private *dev_priv);
|
||||
|
||||
|
||||
@ -679,6 +692,9 @@ static int i915_drm_suspend(struct drm_device *dev)
|
||||
|
||||
intel_display_set_init_power(dev_priv, false);
|
||||
|
||||
if (HAS_CSR(dev_priv))
|
||||
flush_work(&dev_priv->csr.work);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -687,10 +703,13 @@ static int i915_drm_suspend_late(struct drm_device *drm_dev, bool hibernation)
|
||||
struct drm_i915_private *dev_priv = drm_dev->dev_private;
|
||||
int ret;
|
||||
|
||||
intel_power_domains_suspend(dev_priv);
|
||||
|
||||
ret = intel_suspend_complete(dev_priv);
|
||||
|
||||
if (ret) {
|
||||
DRM_ERROR("Suspend complete failed: %d\n", ret);
|
||||
intel_power_domains_init_hw(dev_priv, true);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -838,13 +857,11 @@ static int i915_drm_resume_early(struct drm_device *dev)
|
||||
|
||||
if (IS_BROXTON(dev))
|
||||
ret = bxt_resume_prepare(dev_priv);
|
||||
else if (IS_SKYLAKE(dev_priv))
|
||||
ret = skl_resume_prepare(dev_priv);
|
||||
else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
|
||||
hsw_disable_pc8(dev_priv);
|
||||
|
||||
intel_uncore_sanitize(dev);
|
||||
intel_power_domains_init_hw(dev_priv);
|
||||
intel_power_domains_init_hw(dev_priv, true);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -1051,15 +1068,6 @@ static int i915_pm_resume(struct device *dev)
|
||||
return i915_drm_resume(drm_dev);
|
||||
}
|
||||
|
||||
static int skl_suspend_complete(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
/* Enabling DC6 is not a hard requirement to enter runtime D3 */
|
||||
|
||||
skl_uninit_cdclk(dev_priv);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int hsw_suspend_complete(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
hsw_enable_pc8(dev_priv);
|
||||
@ -1099,16 +1107,6 @@ static int bxt_resume_prepare(struct drm_i915_private *dev_priv)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int skl_resume_prepare(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
|
||||
skl_init_cdclk(dev_priv);
|
||||
intel_csr_load_program(dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Save all Gunit registers that may be lost after a D3 and a subsequent
|
||||
* S0i[R123] transition. The list of registers needing a save/restore is
|
||||
@ -1572,8 +1570,6 @@ static int intel_runtime_resume(struct device *device)
|
||||
|
||||
if (IS_BROXTON(dev))
|
||||
ret = bxt_resume_prepare(dev_priv);
|
||||
else if (IS_SKYLAKE(dev))
|
||||
ret = skl_resume_prepare(dev_priv);
|
||||
else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
|
||||
hsw_disable_pc8(dev_priv);
|
||||
else if (IS_VALLEYVIEW(dev_priv))
|
||||
@ -1616,8 +1612,6 @@ static int intel_suspend_complete(struct drm_i915_private *dev_priv)
|
||||
|
||||
if (IS_BROXTON(dev_priv))
|
||||
ret = bxt_suspend_complete(dev_priv);
|
||||
else if (IS_SKYLAKE(dev_priv))
|
||||
ret = skl_suspend_complete(dev_priv);
|
||||
else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
|
||||
ret = hsw_suspend_complete(dev_priv);
|
||||
else if (IS_VALLEYVIEW(dev_priv))
|
||||
|
@ -57,7 +57,7 @@
|
||||
|
||||
#define DRIVER_NAME "i915"
|
||||
#define DRIVER_DESC "Intel Graphics"
|
||||
#define DRIVER_DATE "20151010"
|
||||
#define DRIVER_DATE "20151120"
|
||||
|
||||
#undef WARN_ON
|
||||
/* Many gcc seem to no see through this and fall over :( */
|
||||
@ -180,15 +180,11 @@ enum intel_display_power_domain {
|
||||
POWER_DOMAIN_TRANSCODER_B,
|
||||
POWER_DOMAIN_TRANSCODER_C,
|
||||
POWER_DOMAIN_TRANSCODER_EDP,
|
||||
POWER_DOMAIN_PORT_DDI_A_2_LANES,
|
||||
POWER_DOMAIN_PORT_DDI_A_4_LANES,
|
||||
POWER_DOMAIN_PORT_DDI_B_2_LANES,
|
||||
POWER_DOMAIN_PORT_DDI_B_4_LANES,
|
||||
POWER_DOMAIN_PORT_DDI_C_2_LANES,
|
||||
POWER_DOMAIN_PORT_DDI_C_4_LANES,
|
||||
POWER_DOMAIN_PORT_DDI_D_2_LANES,
|
||||
POWER_DOMAIN_PORT_DDI_D_4_LANES,
|
||||
POWER_DOMAIN_PORT_DDI_E_2_LANES,
|
||||
POWER_DOMAIN_PORT_DDI_A_LANES,
|
||||
POWER_DOMAIN_PORT_DDI_B_LANES,
|
||||
POWER_DOMAIN_PORT_DDI_C_LANES,
|
||||
POWER_DOMAIN_PORT_DDI_D_LANES,
|
||||
POWER_DOMAIN_PORT_DDI_E_LANES,
|
||||
POWER_DOMAIN_PORT_DSI,
|
||||
POWER_DOMAIN_PORT_CRT,
|
||||
POWER_DOMAIN_PORT_OTHER,
|
||||
@ -199,6 +195,8 @@ enum intel_display_power_domain {
|
||||
POWER_DOMAIN_AUX_B,
|
||||
POWER_DOMAIN_AUX_C,
|
||||
POWER_DOMAIN_AUX_D,
|
||||
POWER_DOMAIN_GMBUS,
|
||||
POWER_DOMAIN_MODESET,
|
||||
POWER_DOMAIN_INIT,
|
||||
|
||||
POWER_DOMAIN_NUM,
|
||||
@ -630,11 +628,9 @@ struct drm_i915_display_funcs {
|
||||
int target, int refclk,
|
||||
struct dpll *match_clock,
|
||||
struct dpll *best_clock);
|
||||
int (*compute_pipe_wm)(struct intel_crtc *crtc,
|
||||
struct drm_atomic_state *state);
|
||||
void (*update_wm)(struct drm_crtc *crtc);
|
||||
void (*update_sprite_wm)(struct drm_plane *plane,
|
||||
struct drm_crtc *crtc,
|
||||
uint32_t sprite_width, uint32_t sprite_height,
|
||||
int pixel_size, bool enable, bool scaled);
|
||||
int (*modeset_calc_cdclk)(struct drm_atomic_state *state);
|
||||
void (*modeset_commit_cdclk)(struct drm_atomic_state *state);
|
||||
/* Returns the active state of the crtc, and if the crtc is active,
|
||||
@ -692,18 +688,18 @@ struct intel_uncore_funcs {
|
||||
void (*force_wake_put)(struct drm_i915_private *dev_priv,
|
||||
enum forcewake_domains domains);
|
||||
|
||||
uint8_t (*mmio_readb)(struct drm_i915_private *dev_priv, off_t offset, bool trace);
|
||||
uint16_t (*mmio_readw)(struct drm_i915_private *dev_priv, off_t offset, bool trace);
|
||||
uint32_t (*mmio_readl)(struct drm_i915_private *dev_priv, off_t offset, bool trace);
|
||||
uint64_t (*mmio_readq)(struct drm_i915_private *dev_priv, off_t offset, bool trace);
|
||||
uint8_t (*mmio_readb)(struct drm_i915_private *dev_priv, i915_reg_t r, bool trace);
|
||||
uint16_t (*mmio_readw)(struct drm_i915_private *dev_priv, i915_reg_t r, bool trace);
|
||||
uint32_t (*mmio_readl)(struct drm_i915_private *dev_priv, i915_reg_t r, bool trace);
|
||||
uint64_t (*mmio_readq)(struct drm_i915_private *dev_priv, i915_reg_t r, bool trace);
|
||||
|
||||
void (*mmio_writeb)(struct drm_i915_private *dev_priv, off_t offset,
|
||||
void (*mmio_writeb)(struct drm_i915_private *dev_priv, i915_reg_t r,
|
||||
uint8_t val, bool trace);
|
||||
void (*mmio_writew)(struct drm_i915_private *dev_priv, off_t offset,
|
||||
void (*mmio_writew)(struct drm_i915_private *dev_priv, i915_reg_t r,
|
||||
uint16_t val, bool trace);
|
||||
void (*mmio_writel)(struct drm_i915_private *dev_priv, off_t offset,
|
||||
void (*mmio_writel)(struct drm_i915_private *dev_priv, i915_reg_t r,
|
||||
uint32_t val, bool trace);
|
||||
void (*mmio_writeq)(struct drm_i915_private *dev_priv, off_t offset,
|
||||
void (*mmio_writeq)(struct drm_i915_private *dev_priv, i915_reg_t r,
|
||||
uint64_t val, bool trace);
|
||||
};
|
||||
|
||||
@ -720,11 +716,11 @@ struct intel_uncore {
|
||||
enum forcewake_domain_id id;
|
||||
unsigned wake_count;
|
||||
struct timer_list timer;
|
||||
u32 reg_set;
|
||||
i915_reg_t reg_set;
|
||||
u32 val_set;
|
||||
u32 val_clear;
|
||||
u32 reg_ack;
|
||||
u32 reg_post;
|
||||
i915_reg_t reg_ack;
|
||||
i915_reg_t reg_post;
|
||||
u32 val_reset;
|
||||
} fw_domain[FW_DOMAIN_ID_COUNT];
|
||||
};
|
||||
@ -739,20 +735,19 @@ struct intel_uncore {
|
||||
#define for_each_fw_domain(domain__, dev_priv__, i__) \
|
||||
for_each_fw_domain_mask(domain__, FORCEWAKE_ALL, dev_priv__, i__)
|
||||
|
||||
enum csr_state {
|
||||
FW_UNINITIALIZED = 0,
|
||||
FW_LOADED,
|
||||
FW_FAILED
|
||||
};
|
||||
#define CSR_VERSION(major, minor) ((major) << 16 | (minor))
|
||||
#define CSR_VERSION_MAJOR(version) ((version) >> 16)
|
||||
#define CSR_VERSION_MINOR(version) ((version) & 0xffff)
|
||||
|
||||
struct intel_csr {
|
||||
struct work_struct work;
|
||||
const char *fw_path;
|
||||
uint32_t *dmc_payload;
|
||||
uint32_t dmc_fw_size;
|
||||
uint32_t version;
|
||||
uint32_t mmio_count;
|
||||
uint32_t mmioaddr[8];
|
||||
i915_reg_t mmioaddr[8];
|
||||
uint32_t mmiodata[8];
|
||||
enum csr_state state;
|
||||
};
|
||||
|
||||
#define DEV_INFO_FOR_EACH_FLAG(func, sep) \
|
||||
@ -770,6 +765,8 @@ struct intel_csr {
|
||||
func(is_valleyview) sep \
|
||||
func(is_haswell) sep \
|
||||
func(is_skylake) sep \
|
||||
func(is_broxton) sep \
|
||||
func(is_kabylake) sep \
|
||||
func(is_preliminary) sep \
|
||||
func(has_fbc) sep \
|
||||
func(has_pipe_cxsr) sep \
|
||||
@ -928,24 +925,7 @@ struct i915_fbc {
|
||||
struct drm_framebuffer *fb;
|
||||
} *fbc_work;
|
||||
|
||||
enum no_fbc_reason {
|
||||
FBC_OK, /* FBC is enabled */
|
||||
FBC_UNSUPPORTED, /* FBC is not supported by this chipset */
|
||||
FBC_NO_OUTPUT, /* no outputs enabled to compress */
|
||||
FBC_STOLEN_TOO_SMALL, /* not enough space for buffers */
|
||||
FBC_UNSUPPORTED_MODE, /* interlace or doublescanned mode */
|
||||
FBC_MODE_TOO_LARGE, /* mode too large for compression */
|
||||
FBC_BAD_PLANE, /* fbc not supported on plane */
|
||||
FBC_NOT_TILED, /* buffer not tiled */
|
||||
FBC_MULTIPLE_PIPES, /* more than one pipe active */
|
||||
FBC_MODULE_PARAM,
|
||||
FBC_CHIP_DEFAULT, /* disabled by default on this chip */
|
||||
FBC_ROTATION, /* rotation is not supported */
|
||||
FBC_IN_DBG_MASTER, /* kernel debugger is active */
|
||||
FBC_BAD_STRIDE, /* stride is not supported */
|
||||
FBC_PIXEL_RATE, /* pixel rate is too big */
|
||||
FBC_PIXEL_FORMAT /* pixel format is invalid */
|
||||
} no_fbc_reason;
|
||||
const char *no_fbc_reason;
|
||||
|
||||
bool (*fbc_enabled)(struct drm_i915_private *dev_priv);
|
||||
void (*enable_fbc)(struct intel_crtc *crtc);
|
||||
@ -1019,7 +999,7 @@ struct intel_gmbus {
|
||||
struct i2c_adapter adapter;
|
||||
u32 force_bit;
|
||||
u32 reg0;
|
||||
u32 gpio_reg;
|
||||
i915_reg_t gpio_reg;
|
||||
struct i2c_algo_bit_data bit_algo;
|
||||
struct drm_i915_private *dev_priv;
|
||||
};
|
||||
@ -1668,7 +1648,7 @@ struct i915_frontbuffer_tracking {
|
||||
};
|
||||
|
||||
struct i915_wa_reg {
|
||||
u32 addr;
|
||||
i915_reg_t addr;
|
||||
u32 value;
|
||||
/* bitmask representing WA bits */
|
||||
u32 mask;
|
||||
@ -1697,6 +1677,13 @@ struct i915_execbuffer_params {
|
||||
struct drm_i915_gem_request *request;
|
||||
};
|
||||
|
||||
/* used in computing the new watermarks state */
|
||||
struct intel_wm_config {
|
||||
unsigned int num_pipes_active;
|
||||
bool sprites_enabled;
|
||||
bool sprites_scaled;
|
||||
};
|
||||
|
||||
struct drm_i915_private {
|
||||
struct drm_device *dev;
|
||||
struct kmem_cache *objects;
|
||||
@ -1717,9 +1704,6 @@ struct drm_i915_private {
|
||||
|
||||
struct intel_csr csr;
|
||||
|
||||
/* Display CSR-related protection */
|
||||
struct mutex csr_lock;
|
||||
|
||||
struct intel_gmbus gmbus[GMBUS_NUM_PINS];
|
||||
|
||||
/** gmbus_mutex protects against concurrent usage of the single hw gmbus
|
||||
@ -1734,6 +1718,8 @@ struct drm_i915_private {
|
||||
/* MMIO base address for MIPI regs */
|
||||
uint32_t mipi_mmio_base;
|
||||
|
||||
uint32_t psr_mmio_base;
|
||||
|
||||
wait_queue_head_t gmbus_wait_queue;
|
||||
|
||||
struct pci_dev *bridge_dev;
|
||||
@ -1921,6 +1907,9 @@ struct drm_i915_private {
|
||||
*/
|
||||
uint16_t skl_latency[8];
|
||||
|
||||
/* Committed wm config */
|
||||
struct intel_wm_config config;
|
||||
|
||||
/*
|
||||
* The skl_wm_values structure is a bit too big for stack
|
||||
* allocation, so we keep the staging struct where we store
|
||||
@ -2435,6 +2424,15 @@ struct drm_i915_cmd_table {
|
||||
#define INTEL_DEVID(p) (INTEL_INFO(p)->device_id)
|
||||
#define INTEL_REVID(p) (__I915__(p)->dev->pdev->revision)
|
||||
|
||||
#define REVID_FOREVER 0xff
|
||||
/*
|
||||
* Return true if revision is in range [since,until] inclusive.
|
||||
*
|
||||
* Use 0 for open-ended since, and REVID_FOREVER for open-ended until.
|
||||
*/
|
||||
#define IS_REVID(p, since, until) \
|
||||
(INTEL_REVID(p) >= (since) && INTEL_REVID(p) <= (until))
|
||||
|
||||
#define IS_I830(dev) (INTEL_DEVID(dev) == 0x3577)
|
||||
#define IS_845G(dev) (INTEL_DEVID(dev) == 0x2562)
|
||||
#define IS_I85X(dev) (INTEL_INFO(dev)->is_i85x)
|
||||
@ -2461,7 +2459,8 @@ struct drm_i915_cmd_table {
|
||||
#define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell)
|
||||
#define IS_BROADWELL(dev) (!INTEL_INFO(dev)->is_valleyview && IS_GEN8(dev))
|
||||
#define IS_SKYLAKE(dev) (INTEL_INFO(dev)->is_skylake)
|
||||
#define IS_BROXTON(dev) (!INTEL_INFO(dev)->is_skylake && IS_GEN9(dev))
|
||||
#define IS_BROXTON(dev) (INTEL_INFO(dev)->is_broxton)
|
||||
#define IS_KABYLAKE(dev) (INTEL_INFO(dev)->is_kabylake)
|
||||
#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile)
|
||||
#define IS_HSW_EARLY_SDV(dev) (IS_HASWELL(dev) && \
|
||||
(INTEL_DEVID(dev) & 0xFF00) == 0x0C00)
|
||||
@ -2496,16 +2495,21 @@ struct drm_i915_cmd_table {
|
||||
|
||||
#define IS_PRELIMINARY_HW(intel_info) ((intel_info)->is_preliminary)
|
||||
|
||||
#define SKL_REVID_A0 (0x0)
|
||||
#define SKL_REVID_B0 (0x1)
|
||||
#define SKL_REVID_C0 (0x2)
|
||||
#define SKL_REVID_D0 (0x3)
|
||||
#define SKL_REVID_E0 (0x4)
|
||||
#define SKL_REVID_F0 (0x5)
|
||||
#define SKL_REVID_A0 0x0
|
||||
#define SKL_REVID_B0 0x1
|
||||
#define SKL_REVID_C0 0x2
|
||||
#define SKL_REVID_D0 0x3
|
||||
#define SKL_REVID_E0 0x4
|
||||
#define SKL_REVID_F0 0x5
|
||||
|
||||
#define BXT_REVID_A0 (0x0)
|
||||
#define BXT_REVID_B0 (0x3)
|
||||
#define BXT_REVID_C0 (0x9)
|
||||
#define IS_SKL_REVID(p, since, until) (IS_SKYLAKE(p) && IS_REVID(p, since, until))
|
||||
|
||||
#define BXT_REVID_A0 0x0
|
||||
#define BXT_REVID_A1 0x1
|
||||
#define BXT_REVID_B0 0x3
|
||||
#define BXT_REVID_C0 0x9
|
||||
|
||||
#define IS_BXT_REVID(p, since, until) (IS_BROXTON(p) && IS_REVID(p, since, until))
|
||||
|
||||
/*
|
||||
* The genX designation typically refers to the render engine, so render
|
||||
@ -2577,10 +2581,10 @@ struct drm_i915_cmd_table {
|
||||
#define HAS_FPGA_DBG_UNCLAIMED(dev) (INTEL_INFO(dev)->has_fpga_dbg)
|
||||
#define HAS_PSR(dev) (IS_HASWELL(dev) || IS_BROADWELL(dev) || \
|
||||
IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev) || \
|
||||
IS_SKYLAKE(dev))
|
||||
IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
|
||||
#define HAS_RUNTIME_PM(dev) (IS_GEN6(dev) || IS_HASWELL(dev) || \
|
||||
IS_BROADWELL(dev) || IS_VALLEYVIEW(dev) || \
|
||||
IS_SKYLAKE(dev))
|
||||
IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
|
||||
#define HAS_RC6(dev) (INTEL_INFO(dev)->gen >= 6)
|
||||
#define HAS_RC6p(dev) (INTEL_INFO(dev)->gen == 6 || IS_IVYBRIDGE(dev))
|
||||
|
||||
@ -2640,6 +2644,7 @@ struct i915_params {
|
||||
int panel_use_ssc;
|
||||
int vbt_sdvo_panel_type;
|
||||
int enable_rc6;
|
||||
int enable_dc;
|
||||
int enable_fbc;
|
||||
int enable_ppgtt;
|
||||
int enable_execlists;
|
||||
@ -2688,7 +2693,6 @@ extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv);
|
||||
extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv);
|
||||
extern void i915_update_gfx_val(struct drm_i915_private *dev_priv);
|
||||
int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool on);
|
||||
void i915_firmware_load_error_print(const char *fw_path, int err);
|
||||
|
||||
/* intel_hotplug.c */
|
||||
void intel_hpd_irq_handler(struct drm_device *dev, u32 pin_mask, u32 long_mask);
|
||||
@ -2995,8 +2999,6 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write);
|
||||
int __must_check
|
||||
i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
|
||||
u32 alignment,
|
||||
struct intel_engine_cs *pipelined,
|
||||
struct drm_i915_gem_request **pipelined_request,
|
||||
const struct i915_ggtt_view *view);
|
||||
void i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj,
|
||||
const struct i915_ggtt_view *view);
|
||||
@ -3351,7 +3353,6 @@ extern void intel_set_rps(struct drm_device *dev, u8 val);
|
||||
extern void intel_set_memory_cxsr(struct drm_i915_private *dev_priv,
|
||||
bool enable);
|
||||
extern void intel_detect_pch(struct drm_device *dev);
|
||||
extern int intel_trans_dp_port_sel(struct drm_crtc *crtc);
|
||||
extern int intel_enable_rc6(const struct drm_device *dev);
|
||||
|
||||
extern bool i915_semaphore_is_enabled(struct drm_device *dev);
|
||||
@ -3434,6 +3435,32 @@ int intel_freq_opcode(struct drm_i915_private *dev_priv, int val);
|
||||
#define POSTING_READ(reg) (void)I915_READ_NOTRACE(reg)
|
||||
#define POSTING_READ16(reg) (void)I915_READ16_NOTRACE(reg)
|
||||
|
||||
#define __raw_read(x, s) \
|
||||
static inline uint##x##_t __raw_i915_read##x(struct drm_i915_private *dev_priv, \
|
||||
i915_reg_t reg) \
|
||||
{ \
|
||||
return read##s(dev_priv->regs + i915_mmio_reg_offset(reg)); \
|
||||
}
|
||||
|
||||
#define __raw_write(x, s) \
|
||||
static inline void __raw_i915_write##x(struct drm_i915_private *dev_priv, \
|
||||
i915_reg_t reg, uint##x##_t val) \
|
||||
{ \
|
||||
write##s(val, dev_priv->regs + i915_mmio_reg_offset(reg)); \
|
||||
}
|
||||
__raw_read(8, b)
|
||||
__raw_read(16, w)
|
||||
__raw_read(32, l)
|
||||
__raw_read(64, q)
|
||||
|
||||
__raw_write(8, b)
|
||||
__raw_write(16, w)
|
||||
__raw_write(32, l)
|
||||
__raw_write(64, q)
|
||||
|
||||
#undef __raw_read
|
||||
#undef __raw_write
|
||||
|
||||
/* These are untraced mmio-accessors that are only valid to be used inside
|
||||
* criticial sections inside IRQ handlers where forcewake is explicitly
|
||||
* controlled.
|
||||
@ -3441,8 +3468,8 @@ int intel_freq_opcode(struct drm_i915_private *dev_priv, int val);
|
||||
* Note: Should only be used between intel_uncore_forcewake_irqlock() and
|
||||
* intel_uncore_forcewake_irqunlock().
|
||||
*/
|
||||
#define I915_READ_FW(reg__) readl(dev_priv->regs + (reg__))
|
||||
#define I915_WRITE_FW(reg__, val__) writel(val__, dev_priv->regs + (reg__))
|
||||
#define I915_READ_FW(reg__) __raw_i915_read32(dev_priv, (reg__))
|
||||
#define I915_WRITE_FW(reg__, val__) __raw_i915_write32(dev_priv, (reg__), (val__))
|
||||
#define POSTING_READ_FW(reg__) (void)I915_READ_FW(reg__)
|
||||
|
||||
/* "Broadcast RGB" property */
|
||||
@ -3450,7 +3477,7 @@ int intel_freq_opcode(struct drm_i915_private *dev_priv, int val);
|
||||
#define INTEL_BROADCAST_RGB_FULL 1
|
||||
#define INTEL_BROADCAST_RGB_LIMITED 2
|
||||
|
||||
static inline uint32_t i915_vgacntrl_reg(struct drm_device *dev)
|
||||
static inline i915_reg_t i915_vgacntrl_reg(struct drm_device *dev)
|
||||
{
|
||||
if (IS_VALLEYVIEW(dev))
|
||||
return VLV_VGACNTRL;
|
||||
|
@ -2737,6 +2737,8 @@ static void i915_gem_reset_ring_status(struct drm_i915_private *dev_priv,
|
||||
static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
|
||||
struct intel_engine_cs *ring)
|
||||
{
|
||||
struct intel_ringbuffer *buffer;
|
||||
|
||||
while (!list_empty(&ring->active_list)) {
|
||||
struct drm_i915_gem_object *obj;
|
||||
|
||||
@ -2752,18 +2754,23 @@ static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
|
||||
* are the ones that keep the context and ringbuffer backing objects
|
||||
* pinned in place.
|
||||
*/
|
||||
while (!list_empty(&ring->execlist_queue)) {
|
||||
struct drm_i915_gem_request *submit_req;
|
||||
|
||||
submit_req = list_first_entry(&ring->execlist_queue,
|
||||
struct drm_i915_gem_request,
|
||||
execlist_link);
|
||||
list_del(&submit_req->execlist_link);
|
||||
if (i915.enable_execlists) {
|
||||
spin_lock_irq(&ring->execlist_lock);
|
||||
while (!list_empty(&ring->execlist_queue)) {
|
||||
struct drm_i915_gem_request *submit_req;
|
||||
|
||||
if (submit_req->ctx != ring->default_context)
|
||||
intel_lr_context_unpin(submit_req);
|
||||
submit_req = list_first_entry(&ring->execlist_queue,
|
||||
struct drm_i915_gem_request,
|
||||
execlist_link);
|
||||
list_del(&submit_req->execlist_link);
|
||||
|
||||
i915_gem_request_unreference(submit_req);
|
||||
if (submit_req->ctx != ring->default_context)
|
||||
intel_lr_context_unpin(submit_req);
|
||||
|
||||
i915_gem_request_unreference(submit_req);
|
||||
}
|
||||
spin_unlock_irq(&ring->execlist_lock);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -2782,6 +2789,18 @@ static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
|
||||
|
||||
i915_gem_request_retire(request);
|
||||
}
|
||||
|
||||
/* Having flushed all requests from all queues, we know that all
|
||||
* ringbuffers must now be empty. However, since we do not reclaim
|
||||
* all space when retiring the request (to prevent HEADs colliding
|
||||
* with rapid ringbuffer wraparound) the amount of available space
|
||||
* upon reset is less than when we start. Do one more pass over
|
||||
* all the ringbuffers to reset last_retired_head.
|
||||
*/
|
||||
list_for_each_entry(buffer, &ring->buffers, link) {
|
||||
buffer->last_retired_head = buffer->tail;
|
||||
intel_ring_update_space(buffer);
|
||||
}
|
||||
}
|
||||
|
||||
void i915_gem_reset(struct drm_device *dev)
|
||||
@ -3826,7 +3845,7 @@ int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
|
||||
* cacheline, whereas normally such cachelines would get
|
||||
* invalidated.
|
||||
*/
|
||||
if (IS_BROXTON(dev) && INTEL_REVID(dev) < BXT_REVID_B0)
|
||||
if (IS_BXT_REVID(dev, 0, BXT_REVID_A1))
|
||||
return -ENODEV;
|
||||
|
||||
level = I915_CACHE_LLC;
|
||||
@ -3869,17 +3888,11 @@ rpm_put:
|
||||
int
|
||||
i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
|
||||
u32 alignment,
|
||||
struct intel_engine_cs *pipelined,
|
||||
struct drm_i915_gem_request **pipelined_request,
|
||||
const struct i915_ggtt_view *view)
|
||||
{
|
||||
u32 old_read_domains, old_write_domain;
|
||||
int ret;
|
||||
|
||||
ret = i915_gem_object_sync(obj, pipelined, pipelined_request);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* Mark the pin_display early so that we account for the
|
||||
* display coherency whilst setting up the cache domains.
|
||||
*/
|
||||
@ -4476,10 +4489,8 @@ struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
|
||||
{
|
||||
struct i915_vma *vma;
|
||||
list_for_each_entry(vma, &obj->vma_list, vma_link) {
|
||||
if (i915_is_ggtt(vma->vm) &&
|
||||
vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL)
|
||||
continue;
|
||||
if (vma->vm == vm)
|
||||
if (vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL &&
|
||||
vma->vm == vm)
|
||||
return vma;
|
||||
}
|
||||
return NULL;
|
||||
@ -4568,7 +4579,6 @@ int i915_gem_l3_remap(struct drm_i915_gem_request *req, int slice)
|
||||
struct intel_engine_cs *ring = req->ring;
|
||||
struct drm_device *dev = ring->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
u32 reg_base = GEN7_L3LOG_BASE + (slice * 0x200);
|
||||
u32 *remap_info = dev_priv->l3_parity.remap_info[slice];
|
||||
int i, ret;
|
||||
|
||||
@ -4584,10 +4594,10 @@ int i915_gem_l3_remap(struct drm_i915_gem_request *req, int slice)
|
||||
* here because no other code should access these registers other than
|
||||
* at initialization time.
|
||||
*/
|
||||
for (i = 0; i < GEN7_L3LOG_SIZE; i += 4) {
|
||||
for (i = 0; i < GEN7_L3LOG_SIZE / 4; i++) {
|
||||
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
|
||||
intel_ring_emit(ring, reg_base + i);
|
||||
intel_ring_emit(ring, remap_info[i/4]);
|
||||
intel_ring_emit_reg(ring, GEN7_L3LOG(slice, i));
|
||||
intel_ring_emit(ring, remap_info[i]);
|
||||
}
|
||||
|
||||
intel_ring_advance(ring);
|
||||
@ -4755,18 +4765,9 @@ i915_gem_init_hw(struct drm_device *dev)
|
||||
if (HAS_GUC_UCODE(dev)) {
|
||||
ret = intel_guc_ucode_load(dev);
|
||||
if (ret) {
|
||||
/*
|
||||
* If we got an error and GuC submission is enabled, map
|
||||
* the error to -EIO so the GPU will be declared wedged.
|
||||
* OTOH, if we didn't intend to use the GuC anyway, just
|
||||
* discard the error and carry on.
|
||||
*/
|
||||
DRM_ERROR("Failed to initialize GuC, error %d%s\n", ret,
|
||||
i915.enable_guc_submission ? "" :
|
||||
" (ignored)");
|
||||
ret = i915.enable_guc_submission ? -EIO : 0;
|
||||
if (ret)
|
||||
goto out;
|
||||
DRM_ERROR("Failed to initialize GuC, error %d\n", ret);
|
||||
ret = -EIO;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -556,7 +556,7 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
|
||||
if (signaller == ring)
|
||||
continue;
|
||||
|
||||
intel_ring_emit(ring, RING_PSMI_CTL(signaller->mmio_base));
|
||||
intel_ring_emit_reg(ring, RING_PSMI_CTL(signaller->mmio_base));
|
||||
intel_ring_emit(ring, _MASKED_BIT_ENABLE(GEN6_PSMI_SLEEP_MSG_DISABLE));
|
||||
}
|
||||
}
|
||||
@ -581,7 +581,7 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
|
||||
if (signaller == ring)
|
||||
continue;
|
||||
|
||||
intel_ring_emit(ring, RING_PSMI_CTL(signaller->mmio_base));
|
||||
intel_ring_emit_reg(ring, RING_PSMI_CTL(signaller->mmio_base));
|
||||
intel_ring_emit(ring, _MASKED_BIT_DISABLE(GEN6_PSMI_SLEEP_MSG_DISABLE));
|
||||
}
|
||||
}
|
||||
@ -925,6 +925,14 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
|
||||
case I915_CONTEXT_PARAM_NO_ZEROMAP:
|
||||
args->value = ctx->flags & CONTEXT_NO_ZEROMAP;
|
||||
break;
|
||||
case I915_CONTEXT_PARAM_GTT_SIZE:
|
||||
if (ctx->ppgtt)
|
||||
args->value = ctx->ppgtt->base.total;
|
||||
else if (to_i915(dev)->mm.aliasing_ppgtt)
|
||||
args->value = to_i915(dev)->mm.aliasing_ppgtt->base.total;
|
||||
else
|
||||
args->value = to_i915(dev)->gtt.base.total;
|
||||
break;
|
||||
default:
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
|
@ -1114,7 +1114,7 @@ i915_reset_gen7_sol_offsets(struct drm_device *dev,
|
||||
|
||||
for (i = 0; i < 4; i++) {
|
||||
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
|
||||
intel_ring_emit(ring, GEN7_SO_WRITE_OFFSET(i));
|
||||
intel_ring_emit_reg(ring, GEN7_SO_WRITE_OFFSET(i));
|
||||
intel_ring_emit(ring, 0);
|
||||
}
|
||||
|
||||
@ -1241,7 +1241,7 @@ i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
|
||||
|
||||
intel_ring_emit(ring, MI_NOOP);
|
||||
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
|
||||
intel_ring_emit(ring, INSTPM);
|
||||
intel_ring_emit_reg(ring, INSTPM);
|
||||
intel_ring_emit(ring, instp_mask << 16 | instp_mode);
|
||||
intel_ring_advance(ring);
|
||||
|
||||
|
@ -59,7 +59,7 @@ static void i965_write_fence_reg(struct drm_device *dev, int reg,
|
||||
struct drm_i915_gem_object *obj)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
int fence_reg_lo, fence_reg_hi;
|
||||
i915_reg_t fence_reg_lo, fence_reg_hi;
|
||||
int fence_pitch_shift;
|
||||
|
||||
if (INTEL_INFO(dev)->gen >= 6) {
|
||||
|
@ -24,6 +24,7 @@
|
||||
*/
|
||||
|
||||
#include <linux/seq_file.h>
|
||||
#include <linux/stop_machine.h>
|
||||
#include <drm/drmP.h>
|
||||
#include <drm/i915_drm.h>
|
||||
#include "i915_drv.h"
|
||||
@ -104,9 +105,11 @@ static int sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt)
|
||||
{
|
||||
bool has_aliasing_ppgtt;
|
||||
bool has_full_ppgtt;
|
||||
bool has_full_48bit_ppgtt;
|
||||
|
||||
has_aliasing_ppgtt = INTEL_INFO(dev)->gen >= 6;
|
||||
has_full_ppgtt = INTEL_INFO(dev)->gen >= 7;
|
||||
has_full_48bit_ppgtt = IS_BROADWELL(dev) || INTEL_INFO(dev)->gen >= 9;
|
||||
|
||||
if (intel_vgpu_active(dev))
|
||||
has_full_ppgtt = false; /* emulation is too hard */
|
||||
@ -125,6 +128,9 @@ static int sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt)
|
||||
if (enable_ppgtt == 2 && has_full_ppgtt)
|
||||
return 2;
|
||||
|
||||
if (enable_ppgtt == 3 && has_full_48bit_ppgtt)
|
||||
return 3;
|
||||
|
||||
#ifdef CONFIG_INTEL_IOMMU
|
||||
/* Disable ppgtt on SNB if VT-d is on. */
|
||||
if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped) {
|
||||
@ -141,7 +147,7 @@ static int sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt)
|
||||
}
|
||||
|
||||
if (INTEL_INFO(dev)->gen >= 8 && i915.enable_execlists)
|
||||
return 2;
|
||||
return has_full_48bit_ppgtt ? 3 : 2;
|
||||
else
|
||||
return has_aliasing_ppgtt ? 1 : 0;
|
||||
}
|
||||
@ -661,10 +667,10 @@ static int gen8_write_pdp(struct drm_i915_gem_request *req,
|
||||
return ret;
|
||||
|
||||
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
|
||||
intel_ring_emit(ring, GEN8_RING_PDP_UDW(ring, entry));
|
||||
intel_ring_emit_reg(ring, GEN8_RING_PDP_UDW(ring, entry));
|
||||
intel_ring_emit(ring, upper_32_bits(addr));
|
||||
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
|
||||
intel_ring_emit(ring, GEN8_RING_PDP_LDW(ring, entry));
|
||||
intel_ring_emit_reg(ring, GEN8_RING_PDP_LDW(ring, entry));
|
||||
intel_ring_emit(ring, lower_32_bits(addr));
|
||||
intel_ring_advance(ring);
|
||||
|
||||
@ -904,14 +910,13 @@ static int gen8_ppgtt_notify_vgt(struct i915_hw_ppgtt *ppgtt, bool create)
|
||||
enum vgt_g2v_type msg;
|
||||
struct drm_device *dev = ppgtt->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
unsigned int offset = vgtif_reg(pdp0_lo);
|
||||
int i;
|
||||
|
||||
if (USES_FULL_48BIT_PPGTT(dev)) {
|
||||
u64 daddr = px_dma(&ppgtt->pml4);
|
||||
|
||||
I915_WRITE(offset, lower_32_bits(daddr));
|
||||
I915_WRITE(offset + 4, upper_32_bits(daddr));
|
||||
I915_WRITE(vgtif_reg(pdp[0].lo), lower_32_bits(daddr));
|
||||
I915_WRITE(vgtif_reg(pdp[0].hi), upper_32_bits(daddr));
|
||||
|
||||
msg = (create ? VGT_G2V_PPGTT_L4_PAGE_TABLE_CREATE :
|
||||
VGT_G2V_PPGTT_L4_PAGE_TABLE_DESTROY);
|
||||
@ -919,10 +924,8 @@ static int gen8_ppgtt_notify_vgt(struct i915_hw_ppgtt *ppgtt, bool create)
|
||||
for (i = 0; i < GEN8_LEGACY_PDPES; i++) {
|
||||
u64 daddr = i915_page_dir_dma_addr(ppgtt, i);
|
||||
|
||||
I915_WRITE(offset, lower_32_bits(daddr));
|
||||
I915_WRITE(offset + 4, upper_32_bits(daddr));
|
||||
|
||||
offset += 8;
|
||||
I915_WRITE(vgtif_reg(pdp[i].lo), lower_32_bits(daddr));
|
||||
I915_WRITE(vgtif_reg(pdp[i].hi), upper_32_bits(daddr));
|
||||
}
|
||||
|
||||
msg = (create ? VGT_G2V_PPGTT_L3_PAGE_TABLE_CREATE :
|
||||
@ -1662,9 +1665,9 @@ static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
|
||||
return ret;
|
||||
|
||||
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(2));
|
||||
intel_ring_emit(ring, RING_PP_DIR_DCLV(ring));
|
||||
intel_ring_emit_reg(ring, RING_PP_DIR_DCLV(ring));
|
||||
intel_ring_emit(ring, PP_DIR_DCLV_2G);
|
||||
intel_ring_emit(ring, RING_PP_DIR_BASE(ring));
|
||||
intel_ring_emit_reg(ring, RING_PP_DIR_BASE(ring));
|
||||
intel_ring_emit(ring, get_pd_offset(ppgtt));
|
||||
intel_ring_emit(ring, MI_NOOP);
|
||||
intel_ring_advance(ring);
|
||||
@ -1699,9 +1702,9 @@ static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
|
||||
return ret;
|
||||
|
||||
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(2));
|
||||
intel_ring_emit(ring, RING_PP_DIR_DCLV(ring));
|
||||
intel_ring_emit_reg(ring, RING_PP_DIR_DCLV(ring));
|
||||
intel_ring_emit(ring, PP_DIR_DCLV_2G);
|
||||
intel_ring_emit(ring, RING_PP_DIR_BASE(ring));
|
||||
intel_ring_emit_reg(ring, RING_PP_DIR_BASE(ring));
|
||||
intel_ring_emit(ring, get_pd_offset(ppgtt));
|
||||
intel_ring_emit(ring, MI_NOOP);
|
||||
intel_ring_advance(ring);
|
||||
@ -2528,6 +2531,26 @@ static int ggtt_bind_vma(struct i915_vma *vma,
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct ggtt_bind_vma__cb {
|
||||
struct i915_vma *vma;
|
||||
enum i915_cache_level cache_level;
|
||||
u32 flags;
|
||||
};
|
||||
|
||||
static int ggtt_bind_vma__cb(void *_arg)
|
||||
{
|
||||
struct ggtt_bind_vma__cb *arg = _arg;
|
||||
return ggtt_bind_vma(arg->vma, arg->cache_level, arg->flags);
|
||||
}
|
||||
|
||||
static int ggtt_bind_vma__BKL(struct i915_vma *vma,
|
||||
enum i915_cache_level cache_level,
|
||||
u32 flags)
|
||||
{
|
||||
struct ggtt_bind_vma__cb arg = { vma, cache_level, flags };
|
||||
return stop_machine(ggtt_bind_vma__cb, &arg, NULL);
|
||||
}
|
||||
|
||||
static int aliasing_gtt_bind_vma(struct i915_vma *vma,
|
||||
enum i915_cache_level cache_level,
|
||||
u32 flags)
|
||||
@ -2995,6 +3018,9 @@ static int gen8_gmch_probe(struct drm_device *dev,
|
||||
dev_priv->gtt.base.bind_vma = ggtt_bind_vma;
|
||||
dev_priv->gtt.base.unbind_vma = ggtt_unbind_vma;
|
||||
|
||||
if (IS_CHERRYVIEW(dev))
|
||||
dev_priv->gtt.base.bind_vma = ggtt_bind_vma__BKL;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -3302,7 +3328,7 @@ static struct sg_table *
|
||||
intel_rotate_fb_obj_pages(struct i915_ggtt_view *ggtt_view,
|
||||
struct drm_i915_gem_object *obj)
|
||||
{
|
||||
struct intel_rotation_info *rot_info = &ggtt_view->rotation_info;
|
||||
struct intel_rotation_info *rot_info = &ggtt_view->params.rotation_info;
|
||||
unsigned int size_pages = rot_info->size >> PAGE_SHIFT;
|
||||
unsigned int size_pages_uv;
|
||||
struct sg_page_iter sg_iter;
|
||||
@ -3534,7 +3560,7 @@ i915_ggtt_view_size(struct drm_i915_gem_object *obj,
|
||||
if (view->type == I915_GGTT_VIEW_NORMAL) {
|
||||
return obj->base.size;
|
||||
} else if (view->type == I915_GGTT_VIEW_ROTATED) {
|
||||
return view->rotation_info.size;
|
||||
return view->params.rotation_info.size;
|
||||
} else if (view->type == I915_GGTT_VIEW_PARTIAL) {
|
||||
return view->params.partial.size << PAGE_SHIFT;
|
||||
} else {
|
||||
|
@ -156,13 +156,10 @@ struct i915_ggtt_view {
|
||||
u64 offset;
|
||||
unsigned int size;
|
||||
} partial;
|
||||
struct intel_rotation_info rotation_info;
|
||||
} params;
|
||||
|
||||
struct sg_table *pages;
|
||||
|
||||
union {
|
||||
struct intel_rotation_info rotation_info;
|
||||
};
|
||||
};
|
||||
|
||||
extern const struct i915_ggtt_view i915_ggtt_view_normal;
|
||||
@ -556,7 +553,7 @@ i915_ggtt_view_equal(const struct i915_ggtt_view *a,
|
||||
|
||||
if (a->type != b->type)
|
||||
return false;
|
||||
if (a->type == I915_GGTT_VIEW_PARTIAL)
|
||||
if (a->type != I915_GGTT_VIEW_NORMAL)
|
||||
return !memcmp(&a->params, &b->params, sizeof(a->params));
|
||||
return true;
|
||||
}
|
||||
|
@ -433,7 +433,8 @@ int i915_gem_init_stolen(struct drm_device *dev)
|
||||
&reserved_size);
|
||||
break;
|
||||
default:
|
||||
if (IS_BROADWELL(dev_priv) || IS_SKYLAKE(dev_priv))
|
||||
if (IS_BROADWELL(dev_priv) ||
|
||||
IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev))
|
||||
bdw_get_stolen_reserved(dev_priv, &reserved_base,
|
||||
&reserved_size);
|
||||
else
|
||||
|
@ -176,6 +176,8 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
intel_runtime_pm_get(dev_priv);
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
if (obj->pin_display || obj->framebuffer_references) {
|
||||
ret = -EBUSY;
|
||||
@ -269,6 +271,8 @@ err:
|
||||
drm_gem_object_unreference(&obj->base);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
intel_runtime_pm_put(dev_priv);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -366,6 +366,17 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
|
||||
err_printf(m, "Suspend count: %u\n", error->suspend_count);
|
||||
err_printf(m, "PCI ID: 0x%04x\n", dev->pdev->device);
|
||||
err_printf(m, "IOMMU enabled?: %d\n", error->iommu);
|
||||
|
||||
if (HAS_CSR(dev)) {
|
||||
struct intel_csr *csr = &dev_priv->csr;
|
||||
|
||||
err_printf(m, "DMC loaded: %s\n",
|
||||
yesno(csr->dmc_payload != NULL));
|
||||
err_printf(m, "DMC fw version: %d.%d\n",
|
||||
CSR_VERSION_MAJOR(csr->version),
|
||||
CSR_VERSION_MINOR(csr->version));
|
||||
}
|
||||
|
||||
err_printf(m, "EIR: 0x%08x\n", error->eir);
|
||||
err_printf(m, "IER: 0x%08x\n", error->ier);
|
||||
if (INTEL_INFO(dev)->gen >= 8) {
|
||||
@ -862,7 +873,7 @@ static void i915_record_ring_state(struct drm_device *dev,
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
if (INTEL_INFO(dev)->gen >= 6) {
|
||||
ering->rc_psmi = I915_READ(ring->mmio_base + 0x50);
|
||||
ering->rc_psmi = I915_READ(RING_PSMI_CTL(ring->mmio_base));
|
||||
ering->fault_reg = I915_READ(RING_FAULT_REG(ring));
|
||||
if (INTEL_INFO(dev)->gen >= 8)
|
||||
gen8_record_semaphore_state(dev_priv, error, ring, ering);
|
||||
@ -899,7 +910,7 @@ static void i915_record_ring_state(struct drm_device *dev,
|
||||
ering->ctl = I915_READ_CTL(ring);
|
||||
|
||||
if (I915_NEED_GFX_HWS(dev)) {
|
||||
int mmio;
|
||||
i915_reg_t mmio;
|
||||
|
||||
if (IS_GEN7(dev)) {
|
||||
switch (ring->id) {
|
||||
@ -1071,6 +1082,25 @@ static void i915_gem_record_rings(struct drm_device *dev,
|
||||
list_for_each_entry(request, &ring->request_list, list) {
|
||||
struct drm_i915_error_request *erq;
|
||||
|
||||
if (count >= error->ring[i].num_requests) {
|
||||
/*
|
||||
* If the ring request list was changed in
|
||||
* between the point where the error request
|
||||
* list was created and dimensioned and this
|
||||
* point then just exit early to avoid crashes.
|
||||
*
|
||||
* We don't need to communicate that the
|
||||
* request list changed state during error
|
||||
* state capture and that the error state is
|
||||
* slightly incorrect as a consequence since we
|
||||
* are typically only interested in the request
|
||||
* list state at the point of error state
|
||||
* capture, not in any changes happening during
|
||||
* the capture.
|
||||
*/
|
||||
break;
|
||||
}
|
||||
|
||||
erq = &error->ring[i].requests[count++];
|
||||
erq->seqno = request->seqno;
|
||||
erq->jiffies = request->emitted_jiffies;
|
||||
@ -1181,7 +1211,7 @@ static void i915_capture_reg_state(struct drm_i915_private *dev_priv,
|
||||
if (IS_VALLEYVIEW(dev)) {
|
||||
error->gtier[0] = I915_READ(GTIER);
|
||||
error->ier = I915_READ(VLV_IER);
|
||||
error->forcewake = I915_READ(FORCEWAKE_VLV);
|
||||
error->forcewake = I915_READ_FW(FORCEWAKE_VLV);
|
||||
}
|
||||
|
||||
if (IS_GEN7(dev))
|
||||
@ -1193,14 +1223,14 @@ static void i915_capture_reg_state(struct drm_i915_private *dev_priv,
|
||||
}
|
||||
|
||||
if (IS_GEN6(dev)) {
|
||||
error->forcewake = I915_READ(FORCEWAKE);
|
||||
error->forcewake = I915_READ_FW(FORCEWAKE);
|
||||
error->gab_ctl = I915_READ(GAB_CTL);
|
||||
error->gfx_mode = I915_READ(GFX_MODE);
|
||||
}
|
||||
|
||||
/* 2: Registers which belong to multiple generations */
|
||||
if (INTEL_INFO(dev)->gen >= 7)
|
||||
error->forcewake = I915_READ(FORCEWAKE_MT);
|
||||
error->forcewake = I915_READ_FW(FORCEWAKE_MT);
|
||||
|
||||
if (INTEL_INFO(dev)->gen >= 6) {
|
||||
error->derrmr = I915_READ(DERRMR);
|
||||
|
@ -26,7 +26,7 @@
|
||||
|
||||
/* Definitions of GuC H/W registers, bits, etc */
|
||||
|
||||
#define GUC_STATUS 0xc000
|
||||
#define GUC_STATUS _MMIO(0xc000)
|
||||
#define GS_BOOTROM_SHIFT 1
|
||||
#define GS_BOOTROM_MASK (0x7F << GS_BOOTROM_SHIFT)
|
||||
#define GS_BOOTROM_RSA_FAILED (0x50 << GS_BOOTROM_SHIFT)
|
||||
@ -39,40 +39,41 @@
|
||||
#define GS_MIA_MASK (0x07 << GS_MIA_SHIFT)
|
||||
#define GS_MIA_CORE_STATE (1 << GS_MIA_SHIFT)
|
||||
|
||||
#define SOFT_SCRATCH(n) (0xc180 + ((n) * 4))
|
||||
#define SOFT_SCRATCH(n) _MMIO(0xc180 + (n) * 4)
|
||||
|
||||
#define UOS_RSA_SCRATCH(i) (0xc200 + (i) * 4)
|
||||
#define DMA_ADDR_0_LOW 0xc300
|
||||
#define DMA_ADDR_0_HIGH 0xc304
|
||||
#define DMA_ADDR_1_LOW 0xc308
|
||||
#define DMA_ADDR_1_HIGH 0xc30c
|
||||
#define UOS_RSA_SCRATCH(i) _MMIO(0xc200 + (i) * 4)
|
||||
#define UOS_RSA_SCRATCH_MAX_COUNT 64
|
||||
#define DMA_ADDR_0_LOW _MMIO(0xc300)
|
||||
#define DMA_ADDR_0_HIGH _MMIO(0xc304)
|
||||
#define DMA_ADDR_1_LOW _MMIO(0xc308)
|
||||
#define DMA_ADDR_1_HIGH _MMIO(0xc30c)
|
||||
#define DMA_ADDRESS_SPACE_WOPCM (7 << 16)
|
||||
#define DMA_ADDRESS_SPACE_GTT (8 << 16)
|
||||
#define DMA_COPY_SIZE 0xc310
|
||||
#define DMA_CTRL 0xc314
|
||||
#define DMA_COPY_SIZE _MMIO(0xc310)
|
||||
#define DMA_CTRL _MMIO(0xc314)
|
||||
#define UOS_MOVE (1<<4)
|
||||
#define START_DMA (1<<0)
|
||||
#define DMA_GUC_WOPCM_OFFSET 0xc340
|
||||
#define DMA_GUC_WOPCM_OFFSET _MMIO(0xc340)
|
||||
#define GUC_WOPCM_OFFSET_VALUE 0x80000 /* 512KB */
|
||||
#define GUC_MAX_IDLE_COUNT 0xC3E4
|
||||
#define GUC_MAX_IDLE_COUNT _MMIO(0xC3E4)
|
||||
|
||||
#define GUC_WOPCM_SIZE 0xc050
|
||||
#define GUC_WOPCM_SIZE _MMIO(0xc050)
|
||||
#define GUC_WOPCM_SIZE_VALUE (0x80 << 12) /* 512KB */
|
||||
|
||||
/* GuC addresses below GUC_WOPCM_TOP don't map through the GTT */
|
||||
#define GUC_WOPCM_TOP (GUC_WOPCM_SIZE_VALUE)
|
||||
|
||||
#define GEN8_GT_PM_CONFIG 0x138140
|
||||
#define GEN9LP_GT_PM_CONFIG 0x138140
|
||||
#define GEN9_GT_PM_CONFIG 0x13816c
|
||||
#define GEN8_GT_PM_CONFIG _MMIO(0x138140)
|
||||
#define GEN9LP_GT_PM_CONFIG _MMIO(0x138140)
|
||||
#define GEN9_GT_PM_CONFIG _MMIO(0x13816c)
|
||||
#define GT_DOORBELL_ENABLE (1<<0)
|
||||
|
||||
#define GEN8_GTCR 0x4274
|
||||
#define GEN8_GTCR _MMIO(0x4274)
|
||||
#define GEN8_GTCR_INVALIDATE (1<<0)
|
||||
|
||||
#define GUC_ARAT_C6DIS 0xA178
|
||||
#define GUC_ARAT_C6DIS _MMIO(0xA178)
|
||||
|
||||
#define GUC_SHIM_CONTROL 0xc064
|
||||
#define GUC_SHIM_CONTROL _MMIO(0xc064)
|
||||
#define GUC_DISABLE_SRAM_INIT_TO_ZEROES (1<<0)
|
||||
#define GUC_ENABLE_READ_CACHE_LOGIC (1<<1)
|
||||
#define GUC_ENABLE_MIA_CACHING (1<<2)
|
||||
@ -89,21 +90,21 @@
|
||||
GUC_ENABLE_READ_CACHE_FOR_WOPCM_DATA | \
|
||||
GUC_ENABLE_MIA_CLOCK_GATING)
|
||||
|
||||
#define HOST2GUC_INTERRUPT 0xc4c8
|
||||
#define HOST2GUC_INTERRUPT _MMIO(0xc4c8)
|
||||
#define HOST2GUC_TRIGGER (1<<0)
|
||||
|
||||
#define DRBMISC1 0x1984
|
||||
#define DOORBELL_ENABLE (1<<0)
|
||||
|
||||
#define GEN8_DRBREGL(x) (0x1000 + (x) * 8)
|
||||
#define GEN8_DRBREGL(x) _MMIO(0x1000 + (x) * 8)
|
||||
#define GEN8_DRB_VALID (1<<0)
|
||||
#define GEN8_DRBREGU(x) (GEN8_DRBREGL(x) + 4)
|
||||
#define GEN8_DRBREGU(x) _MMIO(0x1000 + (x) * 8 + 4)
|
||||
|
||||
#define DE_GUCRMR 0x44054
|
||||
#define DE_GUCRMR _MMIO(0x44054)
|
||||
|
||||
#define GUC_BCS_RCS_IER 0xC550
|
||||
#define GUC_VCS2_VCS1_IER 0xC554
|
||||
#define GUC_WD_VECS_IER 0xC558
|
||||
#define GUC_PM_P24C_IER 0xC55C
|
||||
#define GUC_BCS_RCS_IER _MMIO(0xC550)
|
||||
#define GUC_VCS2_VCS1_IER _MMIO(0xC554)
|
||||
#define GUC_WD_VECS_IER _MMIO(0xC558)
|
||||
#define GUC_PM_P24C_IER _MMIO(0xC55C)
|
||||
|
||||
#endif
|
||||
|
@ -27,7 +27,7 @@
|
||||
#include "intel_guc.h"
|
||||
|
||||
/**
|
||||
* DOC: GuC Client
|
||||
* DOC: GuC-based command submission
|
||||
*
|
||||
* i915_guc_client:
|
||||
* We use the term client to avoid confusion with contexts. A i915_guc_client is
|
||||
@ -161,9 +161,9 @@ static int host2guc_sample_forcewake(struct intel_guc *guc,
|
||||
data[0] = HOST2GUC_ACTION_SAMPLE_FORCEWAKE;
|
||||
/* WaRsDisableCoarsePowerGating:skl,bxt */
|
||||
if (!intel_enable_rc6(dev_priv->dev) ||
|
||||
(IS_BROXTON(dev) && (INTEL_REVID(dev) < BXT_REVID_B0)) ||
|
||||
(IS_SKL_GT3(dev) && (INTEL_REVID(dev) <= SKL_REVID_E0)) ||
|
||||
(IS_SKL_GT4(dev) && (INTEL_REVID(dev) <= SKL_REVID_E0)))
|
||||
IS_BXT_REVID(dev, 0, BXT_REVID_A1) ||
|
||||
(IS_SKL_GT3(dev) && IS_SKL_REVID(dev, 0, SKL_REVID_E0)) ||
|
||||
(IS_SKL_GT4(dev) && IS_SKL_REVID(dev, 0, SKL_REVID_E0)))
|
||||
data[1] = 0;
|
||||
else
|
||||
/* bit 0 and 1 are for Render and Media domain separately */
|
||||
@ -258,7 +258,7 @@ static void guc_disable_doorbell(struct intel_guc *guc,
|
||||
struct drm_i915_private *dev_priv = guc_to_i915(guc);
|
||||
struct guc_doorbell_info *doorbell;
|
||||
void *base;
|
||||
int drbreg = GEN8_DRBREGL(client->doorbell_id);
|
||||
i915_reg_t drbreg = GEN8_DRBREGL(client->doorbell_id);
|
||||
int value;
|
||||
|
||||
base = kmap_atomic(i915_gem_object_get_page(client->client_obj, 0));
|
||||
@ -588,8 +588,7 @@ static void lr_context_update(struct drm_i915_gem_request *rq)
|
||||
/**
|
||||
* i915_guc_submit() - Submit commands through GuC
|
||||
* @client: the guc client where commands will go through
|
||||
* @ctx: LRC where commands come from
|
||||
* @ring: HW engine that will excute the commands
|
||||
* @rq: request associated with the commands
|
||||
*
|
||||
* Return: 0 if succeed
|
||||
*/
|
||||
@ -731,7 +730,8 @@ static void guc_client_free(struct drm_device *dev,
|
||||
* The kernel client to replace ExecList submission is created with
|
||||
* NORMAL priority. Priority of a client for scheduler can be HIGH,
|
||||
* while a preemption context can use CRITICAL.
|
||||
* @ctx the context to own the client (we use the default render context)
|
||||
* @ctx: the context that owns the client (we use the default render
|
||||
* context)
|
||||
*
|
||||
* Return: An i915_guc_client object if success.
|
||||
*/
|
||||
|
@ -139,7 +139,8 @@ static const u32 hpd_bxt[HPD_NUM_PINS] = {
|
||||
/*
|
||||
* We should clear IMR at preinstall/uninstall, and just check at postinstall.
|
||||
*/
|
||||
static void gen5_assert_iir_is_zero(struct drm_i915_private *dev_priv, u32 reg)
|
||||
static void gen5_assert_iir_is_zero(struct drm_i915_private *dev_priv,
|
||||
i915_reg_t reg)
|
||||
{
|
||||
u32 val = I915_READ(reg);
|
||||
|
||||
@ -147,7 +148,7 @@ static void gen5_assert_iir_is_zero(struct drm_i915_private *dev_priv, u32 reg)
|
||||
return;
|
||||
|
||||
WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n",
|
||||
reg, val);
|
||||
i915_mmio_reg_offset(reg), val);
|
||||
I915_WRITE(reg, 0xffffffff);
|
||||
POSTING_READ(reg);
|
||||
I915_WRITE(reg, 0xffffffff);
|
||||
@ -283,17 +284,17 @@ void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
|
||||
ilk_update_gt_irq(dev_priv, mask, 0);
|
||||
}
|
||||
|
||||
static u32 gen6_pm_iir(struct drm_i915_private *dev_priv)
|
||||
static i915_reg_t gen6_pm_iir(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR;
|
||||
}
|
||||
|
||||
static u32 gen6_pm_imr(struct drm_i915_private *dev_priv)
|
||||
static i915_reg_t gen6_pm_imr(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IMR(2) : GEN6_PMIMR;
|
||||
}
|
||||
|
||||
static u32 gen6_pm_ier(struct drm_i915_private *dev_priv)
|
||||
static i915_reg_t gen6_pm_ier(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IER(2) : GEN6_PMIER;
|
||||
}
|
||||
@ -350,7 +351,7 @@ void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
|
||||
void gen6_reset_rps_interrupts(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
uint32_t reg = gen6_pm_iir(dev_priv);
|
||||
i915_reg_t reg = gen6_pm_iir(dev_priv);
|
||||
|
||||
spin_lock_irq(&dev_priv->irq_lock);
|
||||
I915_WRITE(reg, dev_priv->pm_rps_events);
|
||||
@ -477,7 +478,7 @@ static void
|
||||
__i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
|
||||
u32 enable_mask, u32 status_mask)
|
||||
{
|
||||
u32 reg = PIPESTAT(pipe);
|
||||
i915_reg_t reg = PIPESTAT(pipe);
|
||||
u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
|
||||
|
||||
assert_spin_locked(&dev_priv->irq_lock);
|
||||
@ -504,7 +505,7 @@ static void
|
||||
__i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
|
||||
u32 enable_mask, u32 status_mask)
|
||||
{
|
||||
u32 reg = PIPESTAT(pipe);
|
||||
i915_reg_t reg = PIPESTAT(pipe);
|
||||
u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
|
||||
|
||||
assert_spin_locked(&dev_priv->irq_lock);
|
||||
@ -665,8 +666,7 @@ static u32 i8xx_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
|
||||
static u32 i915_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
unsigned long high_frame;
|
||||
unsigned long low_frame;
|
||||
i915_reg_t high_frame, low_frame;
|
||||
u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal;
|
||||
struct intel_crtc *intel_crtc =
|
||||
to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
|
||||
@ -717,9 +717,7 @@ static u32 g4x_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
|
||||
return I915_READ(PIPE_FRMCOUNT_G4X(pipe));
|
||||
}
|
||||
|
||||
/* raw reads, only for fast reads of display block, no need for forcewake etc. */
|
||||
#define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__))
|
||||
|
||||
/* I915_READ_FW, only for fast reads of display block, no need for forcewake etc. */
|
||||
static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
|
||||
{
|
||||
struct drm_device *dev = crtc->base.dev;
|
||||
@ -733,9 +731,9 @@ static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
|
||||
vtotal /= 2;
|
||||
|
||||
if (IS_GEN2(dev))
|
||||
position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
|
||||
position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
|
||||
else
|
||||
position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
|
||||
position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
|
||||
|
||||
/*
|
||||
* On HSW, the DSL reg (0x70000) appears to return 0 if we
|
||||
@ -827,7 +825,7 @@ static int i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
|
||||
* We can split this into vertical and horizontal
|
||||
* scanout position.
|
||||
*/
|
||||
position = (__raw_i915_read32(dev_priv, PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
|
||||
position = (I915_READ_FW(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
|
||||
|
||||
/* convert to pixel counts */
|
||||
vbl_start *= htotal;
|
||||
@ -1188,7 +1186,7 @@ static void ivybridge_parity_work(struct work_struct *work)
|
||||
POSTING_READ(GEN7_MISCCPCTL);
|
||||
|
||||
while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) {
|
||||
u32 reg;
|
||||
i915_reg_t reg;
|
||||
|
||||
slice--;
|
||||
if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv->dev)))
|
||||
@ -1196,7 +1194,7 @@ static void ivybridge_parity_work(struct work_struct *work)
|
||||
|
||||
dev_priv->l3_parity.which_slice &= ~(1<<slice);
|
||||
|
||||
reg = GEN7_L3CDERRST1 + (slice * 0x200);
|
||||
reg = GEN7_L3CDERRST1(slice);
|
||||
|
||||
error_status = I915_READ(reg);
|
||||
row = GEN7_PARITY_ERROR_ROW(error_status);
|
||||
@ -1290,70 +1288,69 @@ static void snb_gt_irq_handler(struct drm_device *dev,
|
||||
ivybridge_parity_error_irq_handler(dev, gt_iir);
|
||||
}
|
||||
|
||||
static __always_inline void
|
||||
gen8_cs_irq_handler(struct intel_engine_cs *ring, u32 iir, int test_shift)
|
||||
{
|
||||
if (iir & (GT_RENDER_USER_INTERRUPT << test_shift))
|
||||
notify_ring(ring);
|
||||
if (iir & (GT_CONTEXT_SWITCH_INTERRUPT << test_shift))
|
||||
intel_lrc_irq_handler(ring);
|
||||
}
|
||||
|
||||
static irqreturn_t gen8_gt_irq_handler(struct drm_i915_private *dev_priv,
|
||||
u32 master_ctl)
|
||||
{
|
||||
irqreturn_t ret = IRQ_NONE;
|
||||
|
||||
if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
|
||||
u32 tmp = I915_READ_FW(GEN8_GT_IIR(0));
|
||||
if (tmp) {
|
||||
I915_WRITE_FW(GEN8_GT_IIR(0), tmp);
|
||||
u32 iir = I915_READ_FW(GEN8_GT_IIR(0));
|
||||
if (iir) {
|
||||
I915_WRITE_FW(GEN8_GT_IIR(0), iir);
|
||||
ret = IRQ_HANDLED;
|
||||
|
||||
if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT))
|
||||
intel_lrc_irq_handler(&dev_priv->ring[RCS]);
|
||||
if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT))
|
||||
notify_ring(&dev_priv->ring[RCS]);
|
||||
gen8_cs_irq_handler(&dev_priv->ring[RCS],
|
||||
iir, GEN8_RCS_IRQ_SHIFT);
|
||||
|
||||
if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT))
|
||||
intel_lrc_irq_handler(&dev_priv->ring[BCS]);
|
||||
if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT))
|
||||
notify_ring(&dev_priv->ring[BCS]);
|
||||
gen8_cs_irq_handler(&dev_priv->ring[BCS],
|
||||
iir, GEN8_BCS_IRQ_SHIFT);
|
||||
} else
|
||||
DRM_ERROR("The master control interrupt lied (GT0)!\n");
|
||||
}
|
||||
|
||||
if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) {
|
||||
u32 tmp = I915_READ_FW(GEN8_GT_IIR(1));
|
||||
if (tmp) {
|
||||
I915_WRITE_FW(GEN8_GT_IIR(1), tmp);
|
||||
u32 iir = I915_READ_FW(GEN8_GT_IIR(1));
|
||||
if (iir) {
|
||||
I915_WRITE_FW(GEN8_GT_IIR(1), iir);
|
||||
ret = IRQ_HANDLED;
|
||||
|
||||
if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT))
|
||||
intel_lrc_irq_handler(&dev_priv->ring[VCS]);
|
||||
if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT))
|
||||
notify_ring(&dev_priv->ring[VCS]);
|
||||
gen8_cs_irq_handler(&dev_priv->ring[VCS],
|
||||
iir, GEN8_VCS1_IRQ_SHIFT);
|
||||
|
||||
if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT))
|
||||
intel_lrc_irq_handler(&dev_priv->ring[VCS2]);
|
||||
if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT))
|
||||
notify_ring(&dev_priv->ring[VCS2]);
|
||||
gen8_cs_irq_handler(&dev_priv->ring[VCS2],
|
||||
iir, GEN8_VCS2_IRQ_SHIFT);
|
||||
} else
|
||||
DRM_ERROR("The master control interrupt lied (GT1)!\n");
|
||||
}
|
||||
|
||||
if (master_ctl & GEN8_GT_VECS_IRQ) {
|
||||
u32 tmp = I915_READ_FW(GEN8_GT_IIR(3));
|
||||
if (tmp) {
|
||||
I915_WRITE_FW(GEN8_GT_IIR(3), tmp);
|
||||
u32 iir = I915_READ_FW(GEN8_GT_IIR(3));
|
||||
if (iir) {
|
||||
I915_WRITE_FW(GEN8_GT_IIR(3), iir);
|
||||
ret = IRQ_HANDLED;
|
||||
|
||||
if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT))
|
||||
intel_lrc_irq_handler(&dev_priv->ring[VECS]);
|
||||
if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT))
|
||||
notify_ring(&dev_priv->ring[VECS]);
|
||||
gen8_cs_irq_handler(&dev_priv->ring[VECS],
|
||||
iir, GEN8_VECS_IRQ_SHIFT);
|
||||
} else
|
||||
DRM_ERROR("The master control interrupt lied (GT3)!\n");
|
||||
}
|
||||
|
||||
if (master_ctl & GEN8_GT_PM_IRQ) {
|
||||
u32 tmp = I915_READ_FW(GEN8_GT_IIR(2));
|
||||
if (tmp & dev_priv->pm_rps_events) {
|
||||
u32 iir = I915_READ_FW(GEN8_GT_IIR(2));
|
||||
if (iir & dev_priv->pm_rps_events) {
|
||||
I915_WRITE_FW(GEN8_GT_IIR(2),
|
||||
tmp & dev_priv->pm_rps_events);
|
||||
iir & dev_priv->pm_rps_events);
|
||||
ret = IRQ_HANDLED;
|
||||
gen6_rps_irq_handler(dev_priv, tmp);
|
||||
gen6_rps_irq_handler(dev_priv, iir);
|
||||
} else
|
||||
DRM_ERROR("The master control interrupt lied (PM)!\n");
|
||||
}
|
||||
@ -1625,7 +1622,7 @@ static void valleyview_pipestat_irq_handler(struct drm_device *dev, u32 iir)
|
||||
|
||||
spin_lock(&dev_priv->irq_lock);
|
||||
for_each_pipe(dev_priv, pipe) {
|
||||
int reg;
|
||||
i915_reg_t reg;
|
||||
u32 mask, iir_bit = 0;
|
||||
|
||||
/*
|
||||
@ -2354,9 +2351,13 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
|
||||
spt_irq_handler(dev, pch_iir);
|
||||
else
|
||||
cpt_irq_handler(dev, pch_iir);
|
||||
} else
|
||||
DRM_ERROR("The master control interrupt lied (SDE)!\n");
|
||||
|
||||
} else {
|
||||
/*
|
||||
* Like on previous PCH there seems to be something
|
||||
* fishy going on with forwarding PCH interrupts.
|
||||
*/
|
||||
DRM_DEBUG_DRIVER("The master control interrupt lied (SDE)!\n");
|
||||
}
|
||||
}
|
||||
|
||||
I915_WRITE_FW(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
|
||||
@ -3869,7 +3870,7 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
|
||||
DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
|
||||
|
||||
for_each_pipe(dev_priv, pipe) {
|
||||
int reg = PIPESTAT(pipe);
|
||||
i915_reg_t reg = PIPESTAT(pipe);
|
||||
pipe_stats[pipe] = I915_READ(reg);
|
||||
|
||||
/*
|
||||
@ -4050,7 +4051,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
|
||||
DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
|
||||
|
||||
for_each_pipe(dev_priv, pipe) {
|
||||
int reg = PIPESTAT(pipe);
|
||||
i915_reg_t reg = PIPESTAT(pipe);
|
||||
pipe_stats[pipe] = I915_READ(reg);
|
||||
|
||||
/* Clear the PIPE*STAT regs before the IIR */
|
||||
@ -4272,7 +4273,7 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
|
||||
DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
|
||||
|
||||
for_each_pipe(dev_priv, pipe) {
|
||||
int reg = PIPESTAT(pipe);
|
||||
i915_reg_t reg = PIPESTAT(pipe);
|
||||
pipe_stats[pipe] = I915_READ(reg);
|
||||
|
||||
/*
|
||||
|
@ -32,6 +32,7 @@ struct i915_params i915 __read_mostly = {
|
||||
.panel_use_ssc = -1,
|
||||
.vbt_sdvo_panel_type = -1,
|
||||
.enable_rc6 = -1,
|
||||
.enable_dc = -1,
|
||||
.enable_fbc = -1,
|
||||
.enable_execlists = -1,
|
||||
.enable_hangcheck = true,
|
||||
@ -80,6 +81,11 @@ MODULE_PARM_DESC(enable_rc6,
|
||||
"For example, 3 would enable rc6 and deep rc6, and 7 would enable everything. "
|
||||
"default: -1 (use per-chip default)");
|
||||
|
||||
module_param_named_unsafe(enable_dc, i915.enable_dc, int, 0400);
|
||||
MODULE_PARM_DESC(enable_dc,
|
||||
"Enable power-saving display C-states. "
|
||||
"(-1=auto [default]; 0=disable; 1=up to DC5; 2=up to DC6)");
|
||||
|
||||
module_param_named_unsafe(enable_fbc, i915.enable_fbc, int, 0600);
|
||||
MODULE_PARM_DESC(enable_fbc,
|
||||
"Enable frame buffer compression for power savings "
|
||||
@ -112,7 +118,7 @@ MODULE_PARM_DESC(enable_hangcheck,
|
||||
module_param_named_unsafe(enable_ppgtt, i915.enable_ppgtt, int, 0400);
|
||||
MODULE_PARM_DESC(enable_ppgtt,
|
||||
"Override PPGTT usage. "
|
||||
"(-1=auto [default], 0=disabled, 1=aliasing, 2=full)");
|
||||
"(-1=auto [default], 0=disabled, 1=aliasing, 2=full, 3=full with extended address space)");
|
||||
|
||||
module_param_named_unsafe(enable_execlists, i915.enable_execlists, int, 0400);
|
||||
MODULE_PARM_DESC(enable_execlists,
|
||||
@ -126,7 +132,7 @@ module_param_named_unsafe(preliminary_hw_support, i915.preliminary_hw_support, i
|
||||
MODULE_PARM_DESC(preliminary_hw_support,
|
||||
"Enable preliminary hardware support.");
|
||||
|
||||
module_param_named_unsafe(disable_power_well, i915.disable_power_well, int, 0600);
|
||||
module_param_named_unsafe(disable_power_well, i915.disable_power_well, int, 0400);
|
||||
MODULE_PARM_DESC(disable_power_well,
|
||||
"Disable display power wells when possible "
|
||||
"(-1=auto [default], 0=power wells always on, 1=power wells disabled when possible)");
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -35,7 +35,8 @@
|
||||
#define dev_to_drm_minor(d) dev_get_drvdata((d))
|
||||
|
||||
#ifdef CONFIG_PM
|
||||
static u32 calc_residency(struct drm_device *dev, const u32 reg)
|
||||
static u32 calc_residency(struct drm_device *dev,
|
||||
i915_reg_t reg)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
u64 raw_time; /* 32b value may overflow during fixed point math */
|
||||
|
@ -664,7 +664,7 @@ TRACE_EVENT(i915_flip_complete,
|
||||
);
|
||||
|
||||
TRACE_EVENT_CONDITION(i915_reg_rw,
|
||||
TP_PROTO(bool write, u32 reg, u64 val, int len, bool trace),
|
||||
TP_PROTO(bool write, i915_reg_t reg, u64 val, int len, bool trace),
|
||||
|
||||
TP_ARGS(write, reg, val, len, trace),
|
||||
|
||||
@ -679,7 +679,7 @@ TRACE_EVENT_CONDITION(i915_reg_rw,
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->val = (u64)val;
|
||||
__entry->reg = reg;
|
||||
__entry->reg = i915_mmio_reg_offset(reg);
|
||||
__entry->write = write;
|
||||
__entry->len = len;
|
||||
),
|
||||
|
@ -69,13 +69,13 @@ void i915_check_vgpu(struct drm_device *dev)
|
||||
if (!IS_HASWELL(dev))
|
||||
return;
|
||||
|
||||
magic = readq(dev_priv->regs + vgtif_reg(magic));
|
||||
magic = __raw_i915_read64(dev_priv, vgtif_reg(magic));
|
||||
if (magic != VGT_MAGIC)
|
||||
return;
|
||||
|
||||
version = INTEL_VGT_IF_VERSION_ENCODE(
|
||||
readw(dev_priv->regs + vgtif_reg(version_major)),
|
||||
readw(dev_priv->regs + vgtif_reg(version_minor)));
|
||||
__raw_i915_read16(dev_priv, vgtif_reg(version_major)),
|
||||
__raw_i915_read16(dev_priv, vgtif_reg(version_minor)));
|
||||
if (version != INTEL_VGT_IF_VERSION) {
|
||||
DRM_INFO("VGT interface version mismatch!\n");
|
||||
return;
|
||||
|
@ -92,14 +92,10 @@ struct vgt_if {
|
||||
uint32_t g2v_notify;
|
||||
uint32_t rsv6[7];
|
||||
|
||||
uint32_t pdp0_lo;
|
||||
uint32_t pdp0_hi;
|
||||
uint32_t pdp1_lo;
|
||||
uint32_t pdp1_hi;
|
||||
uint32_t pdp2_lo;
|
||||
uint32_t pdp2_hi;
|
||||
uint32_t pdp3_lo;
|
||||
uint32_t pdp3_hi;
|
||||
struct {
|
||||
uint32_t lo;
|
||||
uint32_t hi;
|
||||
} pdp[4];
|
||||
|
||||
uint32_t execlist_context_descriptor_lo;
|
||||
uint32_t execlist_context_descriptor_hi;
|
||||
@ -108,7 +104,7 @@ struct vgt_if {
|
||||
} __packed;
|
||||
|
||||
#define vgtif_reg(x) \
|
||||
(VGT_PVINFO_PAGE + (long)&((struct vgt_if *)NULL)->x)
|
||||
_MMIO((VGT_PVINFO_PAGE + (long)&((struct vgt_if *)NULL)->x))
|
||||
|
||||
/* vGPU display status to be used by the host side */
|
||||
#define VGT_DRV_DISPLAY_NOT_READY 0
|
||||
|
@ -94,6 +94,7 @@ intel_crtc_duplicate_state(struct drm_crtc *crtc)
|
||||
__drm_atomic_helper_crtc_duplicate_state(crtc, &crtc_state->base);
|
||||
|
||||
crtc_state->update_pipe = false;
|
||||
crtc_state->disable_lp_wm = false;
|
||||
|
||||
return &crtc_state->base;
|
||||
}
|
||||
@ -205,8 +206,6 @@ int intel_atomic_setup_scalers(struct drm_device *dev,
|
||||
* but since this plane is unchanged just do the
|
||||
* minimum required validation.
|
||||
*/
|
||||
if (plane->type == DRM_PLANE_TYPE_PRIMARY)
|
||||
intel_crtc->atomic.wait_for_flips = true;
|
||||
crtc_state->base.planes_changed = true;
|
||||
}
|
||||
|
||||
|
@ -84,6 +84,7 @@ intel_plane_duplicate_state(struct drm_plane *plane)
|
||||
state = &intel_state->base;
|
||||
|
||||
__drm_atomic_helper_plane_duplicate_state(plane, state);
|
||||
intel_state->wait_req = NULL;
|
||||
|
||||
return state;
|
||||
}
|
||||
@ -100,6 +101,7 @@ void
|
||||
intel_plane_destroy_state(struct drm_plane *plane,
|
||||
struct drm_plane_state *state)
|
||||
{
|
||||
WARN_ON(state && to_intel_plane_state(state)->wait_req);
|
||||
drm_atomic_helper_plane_destroy_state(plane, state);
|
||||
}
|
||||
|
||||
|
@ -161,9 +161,9 @@ static bool audio_rate_need_prog(struct intel_crtc *crtc,
|
||||
}
|
||||
|
||||
static bool intel_eld_uptodate(struct drm_connector *connector,
|
||||
int reg_eldv, uint32_t bits_eldv,
|
||||
int reg_elda, uint32_t bits_elda,
|
||||
int reg_edid)
|
||||
i915_reg_t reg_eldv, uint32_t bits_eldv,
|
||||
i915_reg_t reg_elda, uint32_t bits_elda,
|
||||
i915_reg_t reg_edid)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = connector->dev->dev_private;
|
||||
uint8_t *eld = connector->eld;
|
||||
@ -364,8 +364,7 @@ static void ilk_audio_codec_disable(struct intel_encoder *encoder)
|
||||
enum port port = intel_dig_port->port;
|
||||
enum pipe pipe = intel_crtc->pipe;
|
||||
uint32_t tmp, eldv;
|
||||
int aud_config;
|
||||
int aud_cntrl_st2;
|
||||
i915_reg_t aud_config, aud_cntrl_st2;
|
||||
|
||||
DRM_DEBUG_KMS("Disable audio codec on port %c, pipe %c\n",
|
||||
port_name(port), pipe_name(pipe));
|
||||
@ -416,10 +415,7 @@ static void ilk_audio_codec_enable(struct drm_connector *connector,
|
||||
uint32_t eldv;
|
||||
uint32_t tmp;
|
||||
int len, i;
|
||||
int hdmiw_hdmiedid;
|
||||
int aud_config;
|
||||
int aud_cntl_st;
|
||||
int aud_cntrl_st2;
|
||||
i915_reg_t hdmiw_hdmiedid, aud_config, aud_cntl_st, aud_cntrl_st2;
|
||||
|
||||
DRM_DEBUG_KMS("Enable audio codec on port %c, pipe %c, %u bytes ELD\n",
|
||||
port_name(port), pipe_name(pipe), drm_eld_size(eld));
|
||||
@ -591,7 +587,7 @@ static void i915_audio_component_codec_wake_override(struct device *dev,
|
||||
struct drm_i915_private *dev_priv = dev_to_i915(dev);
|
||||
u32 tmp;
|
||||
|
||||
if (!IS_SKYLAKE(dev_priv))
|
||||
if (!IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv))
|
||||
return;
|
||||
|
||||
/*
|
||||
@ -642,10 +638,11 @@ static int i915_audio_component_sync_audio_rate(struct device *dev,
|
||||
u32 tmp;
|
||||
int n;
|
||||
|
||||
/* HSW, BDW SKL need this fix */
|
||||
/* HSW, BDW, SKL, KBL need this fix */
|
||||
if (!IS_SKYLAKE(dev_priv) &&
|
||||
!IS_BROADWELL(dev_priv) &&
|
||||
!IS_HASWELL(dev_priv))
|
||||
!IS_KABYLAKE(dev_priv) &&
|
||||
!IS_BROADWELL(dev_priv) &&
|
||||
!IS_HASWELL(dev_priv))
|
||||
return 0;
|
||||
|
||||
mutex_lock(&dev_priv->av_mutex);
|
||||
|
@ -50,7 +50,7 @@ struct intel_crt {
|
||||
* encoder's enable/disable callbacks */
|
||||
struct intel_connector *connector;
|
||||
bool force_hotplug_required;
|
||||
u32 adpa_reg;
|
||||
i915_reg_t adpa_reg;
|
||||
};
|
||||
|
||||
static struct intel_crt *intel_encoder_to_crt(struct intel_encoder *encoder)
|
||||
@ -480,12 +480,8 @@ intel_crt_load_detect(struct intel_crt *crt)
|
||||
uint32_t vsample;
|
||||
uint32_t vblank, vblank_start, vblank_end;
|
||||
uint32_t dsl;
|
||||
uint32_t bclrpat_reg;
|
||||
uint32_t vtotal_reg;
|
||||
uint32_t vblank_reg;
|
||||
uint32_t vsync_reg;
|
||||
uint32_t pipeconf_reg;
|
||||
uint32_t pipe_dsl_reg;
|
||||
i915_reg_t bclrpat_reg, vtotal_reg,
|
||||
vblank_reg, vsync_reg, pipeconf_reg, pipe_dsl_reg;
|
||||
uint8_t st00;
|
||||
enum drm_connector_status status;
|
||||
|
||||
@ -518,7 +514,7 @@ intel_crt_load_detect(struct intel_crt *crt)
|
||||
/* Wait for next Vblank to substitue
|
||||
* border color for Color info */
|
||||
intel_wait_for_vblank(dev, pipe);
|
||||
st00 = I915_READ8(VGA_MSR_WRITE);
|
||||
st00 = I915_READ8(_VGA_MSR_WRITE);
|
||||
status = ((st00 & (1 << 4)) != 0) ?
|
||||
connector_status_connected :
|
||||
connector_status_disconnected;
|
||||
@ -563,7 +559,7 @@ intel_crt_load_detect(struct intel_crt *crt)
|
||||
do {
|
||||
count++;
|
||||
/* Read the ST00 VGA status register */
|
||||
st00 = I915_READ8(VGA_MSR_WRITE);
|
||||
st00 = I915_READ8(_VGA_MSR_WRITE);
|
||||
if (st00 & (1 << 4))
|
||||
detect++;
|
||||
} while ((I915_READ(pipe_dsl_reg) == dsl));
|
||||
|
@ -47,21 +47,10 @@
|
||||
MODULE_FIRMWARE(I915_CSR_SKL);
|
||||
MODULE_FIRMWARE(I915_CSR_BXT);
|
||||
|
||||
/*
|
||||
* SKL CSR registers for DC5 and DC6
|
||||
*/
|
||||
#define CSR_PROGRAM(i) (0x80000 + (i) * 4)
|
||||
#define CSR_SSP_BASE_ADDR_GEN9 0x00002FC0
|
||||
#define CSR_HTP_ADDR_SKL 0x00500034
|
||||
#define CSR_SSP_BASE 0x8F074
|
||||
#define CSR_HTP_SKL 0x8F004
|
||||
#define CSR_LAST_WRITE 0x8F034
|
||||
#define CSR_LAST_WRITE_VALUE 0xc003b400
|
||||
/* MMIO address range for CSR program (0x80000 - 0x82FFF) */
|
||||
#define SKL_CSR_VERSION_REQUIRED CSR_VERSION(1, 23)
|
||||
|
||||
#define CSR_MAX_FW_SIZE 0x2FFF
|
||||
#define CSR_DEFAULT_FW_OFFSET 0xFFFFFFFF
|
||||
#define CSR_MMIO_START_RANGE 0x80000
|
||||
#define CSR_MMIO_END_RANGE 0x8FFFF
|
||||
|
||||
struct intel_css_header {
|
||||
/* 0x09 for DMC */
|
||||
@ -178,166 +167,134 @@ struct stepping_info {
|
||||
};
|
||||
|
||||
static const struct stepping_info skl_stepping_info[] = {
|
||||
{'A', '0'}, {'B', '0'}, {'C', '0'},
|
||||
{'D', '0'}, {'E', '0'}, {'F', '0'},
|
||||
{'G', '0'}, {'H', '0'}, {'I', '0'}
|
||||
{'A', '0'}, {'B', '0'}, {'C', '0'},
|
||||
{'D', '0'}, {'E', '0'}, {'F', '0'},
|
||||
{'G', '0'}, {'H', '0'}, {'I', '0'}
|
||||
};
|
||||
|
||||
static struct stepping_info bxt_stepping_info[] = {
|
||||
static const struct stepping_info bxt_stepping_info[] = {
|
||||
{'A', '0'}, {'A', '1'}, {'A', '2'},
|
||||
{'B', '0'}, {'B', '1'}, {'B', '2'}
|
||||
};
|
||||
|
||||
static char intel_get_stepping(struct drm_device *dev)
|
||||
static const struct stepping_info *intel_get_stepping_info(struct drm_device *dev)
|
||||
{
|
||||
if (IS_SKYLAKE(dev) && (dev->pdev->revision <
|
||||
ARRAY_SIZE(skl_stepping_info)))
|
||||
return skl_stepping_info[dev->pdev->revision].stepping;
|
||||
else if (IS_BROXTON(dev) && (dev->pdev->revision <
|
||||
ARRAY_SIZE(bxt_stepping_info)))
|
||||
return bxt_stepping_info[dev->pdev->revision].stepping;
|
||||
else
|
||||
return -ENODATA;
|
||||
}
|
||||
const struct stepping_info *si;
|
||||
unsigned int size;
|
||||
|
||||
static char intel_get_substepping(struct drm_device *dev)
|
||||
{
|
||||
if (IS_SKYLAKE(dev) && (dev->pdev->revision <
|
||||
ARRAY_SIZE(skl_stepping_info)))
|
||||
return skl_stepping_info[dev->pdev->revision].substepping;
|
||||
else if (IS_BROXTON(dev) && (dev->pdev->revision <
|
||||
ARRAY_SIZE(bxt_stepping_info)))
|
||||
return bxt_stepping_info[dev->pdev->revision].substepping;
|
||||
else
|
||||
return -ENODATA;
|
||||
}
|
||||
if (IS_SKYLAKE(dev)) {
|
||||
size = ARRAY_SIZE(skl_stepping_info);
|
||||
si = skl_stepping_info;
|
||||
} else if (IS_BROXTON(dev)) {
|
||||
size = ARRAY_SIZE(bxt_stepping_info);
|
||||
si = bxt_stepping_info;
|
||||
} else {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_csr_load_status_get() - to get firmware loading status.
|
||||
* @dev_priv: i915 device.
|
||||
*
|
||||
* This function helps to get the firmware loading status.
|
||||
*
|
||||
* Return: Firmware loading status.
|
||||
*/
|
||||
enum csr_state intel_csr_load_status_get(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
enum csr_state state;
|
||||
if (INTEL_REVID(dev) < size)
|
||||
return si + INTEL_REVID(dev);
|
||||
|
||||
mutex_lock(&dev_priv->csr_lock);
|
||||
state = dev_priv->csr.state;
|
||||
mutex_unlock(&dev_priv->csr_lock);
|
||||
|
||||
return state;
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_csr_load_status_set() - help to set firmware loading status.
|
||||
* @dev_priv: i915 device.
|
||||
* @state: enumeration of firmware loading status.
|
||||
*
|
||||
* Set the firmware loading status.
|
||||
*/
|
||||
void intel_csr_load_status_set(struct drm_i915_private *dev_priv,
|
||||
enum csr_state state)
|
||||
{
|
||||
mutex_lock(&dev_priv->csr_lock);
|
||||
dev_priv->csr.state = state;
|
||||
mutex_unlock(&dev_priv->csr_lock);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_csr_load_program() - write the firmware from memory to register.
|
||||
* @dev: drm device.
|
||||
* @dev_priv: i915 drm device.
|
||||
*
|
||||
* CSR firmware is read from a .bin file and kept in internal memory one time.
|
||||
* Everytime display comes back from low power state this function is called to
|
||||
* copy the firmware from internal memory to registers.
|
||||
*/
|
||||
void intel_csr_load_program(struct drm_device *dev)
|
||||
void intel_csr_load_program(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
u32 *payload = dev_priv->csr.dmc_payload;
|
||||
uint32_t i, fw_size;
|
||||
|
||||
if (!IS_GEN9(dev)) {
|
||||
if (!IS_GEN9(dev_priv)) {
|
||||
DRM_ERROR("No CSR support available for this platform\n");
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* FIXME: Firmware gets lost on S3/S4, but not when entering system
|
||||
* standby or suspend-to-idle (which is just like forced runtime pm).
|
||||
* Unfortunately the ACPI subsystem doesn't yet give us a way to
|
||||
* differentiate this, hence figure it out with this hack.
|
||||
*/
|
||||
if (I915_READ(CSR_PROGRAM(0)))
|
||||
if (!dev_priv->csr.dmc_payload) {
|
||||
DRM_ERROR("Tried to program CSR with empty payload\n");
|
||||
return;
|
||||
}
|
||||
|
||||
mutex_lock(&dev_priv->csr_lock);
|
||||
fw_size = dev_priv->csr.dmc_fw_size;
|
||||
for (i = 0; i < fw_size; i++)
|
||||
I915_WRITE(CSR_PROGRAM(i), payload[i]);
|
||||
|
||||
for (i = 0; i < dev_priv->csr.mmio_count; i++) {
|
||||
I915_WRITE(dev_priv->csr.mmioaddr[i],
|
||||
dev_priv->csr.mmiodata[i]);
|
||||
dev_priv->csr.mmiodata[i]);
|
||||
}
|
||||
|
||||
dev_priv->csr.state = FW_LOADED;
|
||||
mutex_unlock(&dev_priv->csr_lock);
|
||||
}
|
||||
|
||||
static void finish_csr_load(const struct firmware *fw, void *context)
|
||||
static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv,
|
||||
const struct firmware *fw)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = context;
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
struct intel_css_header *css_header;
|
||||
struct intel_package_header *package_header;
|
||||
struct intel_dmc_header *dmc_header;
|
||||
struct intel_csr *csr = &dev_priv->csr;
|
||||
char stepping = intel_get_stepping(dev);
|
||||
char substepping = intel_get_substepping(dev);
|
||||
const struct stepping_info *stepping_info = intel_get_stepping_info(dev);
|
||||
char stepping, substepping;
|
||||
uint32_t dmc_offset = CSR_DEFAULT_FW_OFFSET, readcount = 0, nbytes;
|
||||
uint32_t i;
|
||||
uint32_t *dmc_payload;
|
||||
bool fw_loaded = false;
|
||||
|
||||
if (!fw) {
|
||||
i915_firmware_load_error_print(csr->fw_path, 0);
|
||||
goto out;
|
||||
}
|
||||
if (!fw)
|
||||
return NULL;
|
||||
|
||||
if ((stepping == -ENODATA) || (substepping == -ENODATA)) {
|
||||
if (!stepping_info) {
|
||||
DRM_ERROR("Unknown stepping info, firmware loading failed\n");
|
||||
goto out;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
stepping = stepping_info->stepping;
|
||||
substepping = stepping_info->substepping;
|
||||
|
||||
/* Extract CSS Header information*/
|
||||
css_header = (struct intel_css_header *)fw->data;
|
||||
if (sizeof(struct intel_css_header) !=
|
||||
(css_header->header_len * 4)) {
|
||||
(css_header->header_len * 4)) {
|
||||
DRM_ERROR("Firmware has wrong CSS header length %u bytes\n",
|
||||
(css_header->header_len * 4));
|
||||
goto out;
|
||||
(css_header->header_len * 4));
|
||||
return NULL;
|
||||
}
|
||||
|
||||
csr->version = css_header->version;
|
||||
|
||||
if (IS_SKYLAKE(dev) && csr->version < SKL_CSR_VERSION_REQUIRED) {
|
||||
DRM_INFO("Refusing to load old Skylake DMC firmware v%u.%u,"
|
||||
" please upgrade to v%u.%u or later"
|
||||
" [https://01.org/linuxgraphics/intel-linux-graphics-firmwares].\n",
|
||||
CSR_VERSION_MAJOR(csr->version),
|
||||
CSR_VERSION_MINOR(csr->version),
|
||||
CSR_VERSION_MAJOR(SKL_CSR_VERSION_REQUIRED),
|
||||
CSR_VERSION_MINOR(SKL_CSR_VERSION_REQUIRED));
|
||||
return NULL;
|
||||
}
|
||||
|
||||
readcount += sizeof(struct intel_css_header);
|
||||
|
||||
/* Extract Package Header information*/
|
||||
package_header = (struct intel_package_header *)
|
||||
&fw->data[readcount];
|
||||
&fw->data[readcount];
|
||||
if (sizeof(struct intel_package_header) !=
|
||||
(package_header->header_len * 4)) {
|
||||
(package_header->header_len * 4)) {
|
||||
DRM_ERROR("Firmware has wrong package header length %u bytes\n",
|
||||
(package_header->header_len * 4));
|
||||
goto out;
|
||||
(package_header->header_len * 4));
|
||||
return NULL;
|
||||
}
|
||||
readcount += sizeof(struct intel_package_header);
|
||||
|
||||
/* Search for dmc_offset to find firware binary. */
|
||||
for (i = 0; i < package_header->num_entries; i++) {
|
||||
if (package_header->fw_info[i].substepping == '*' &&
|
||||
stepping == package_header->fw_info[i].stepping) {
|
||||
stepping == package_header->fw_info[i].stepping) {
|
||||
dmc_offset = package_header->fw_info[i].offset;
|
||||
break;
|
||||
} else if (stepping == package_header->fw_info[i].stepping &&
|
||||
@ -345,12 +302,12 @@ static void finish_csr_load(const struct firmware *fw, void *context)
|
||||
dmc_offset = package_header->fw_info[i].offset;
|
||||
break;
|
||||
} else if (package_header->fw_info[i].stepping == '*' &&
|
||||
package_header->fw_info[i].substepping == '*')
|
||||
package_header->fw_info[i].substepping == '*')
|
||||
dmc_offset = package_header->fw_info[i].offset;
|
||||
}
|
||||
if (dmc_offset == CSR_DEFAULT_FW_OFFSET) {
|
||||
DRM_ERROR("Firmware not supported for %c stepping\n", stepping);
|
||||
goto out;
|
||||
return NULL;
|
||||
}
|
||||
readcount += dmc_offset;
|
||||
|
||||
@ -358,26 +315,26 @@ static void finish_csr_load(const struct firmware *fw, void *context)
|
||||
dmc_header = (struct intel_dmc_header *)&fw->data[readcount];
|
||||
if (sizeof(struct intel_dmc_header) != (dmc_header->header_len)) {
|
||||
DRM_ERROR("Firmware has wrong dmc header length %u bytes\n",
|
||||
(dmc_header->header_len));
|
||||
goto out;
|
||||
(dmc_header->header_len));
|
||||
return NULL;
|
||||
}
|
||||
readcount += sizeof(struct intel_dmc_header);
|
||||
|
||||
/* Cache the dmc header info. */
|
||||
if (dmc_header->mmio_count > ARRAY_SIZE(csr->mmioaddr)) {
|
||||
DRM_ERROR("Firmware has wrong mmio count %u\n",
|
||||
dmc_header->mmio_count);
|
||||
goto out;
|
||||
dmc_header->mmio_count);
|
||||
return NULL;
|
||||
}
|
||||
csr->mmio_count = dmc_header->mmio_count;
|
||||
for (i = 0; i < dmc_header->mmio_count; i++) {
|
||||
if (dmc_header->mmioaddr[i] < CSR_MMIO_START_RANGE ||
|
||||
dmc_header->mmioaddr[i] > CSR_MMIO_END_RANGE) {
|
||||
dmc_header->mmioaddr[i] > CSR_MMIO_END_RANGE) {
|
||||
DRM_ERROR(" Firmware has wrong mmio address 0x%x\n",
|
||||
dmc_header->mmioaddr[i]);
|
||||
goto out;
|
||||
dmc_header->mmioaddr[i]);
|
||||
return NULL;
|
||||
}
|
||||
csr->mmioaddr[i] = dmc_header->mmioaddr[i];
|
||||
csr->mmioaddr[i] = _MMIO(dmc_header->mmioaddr[i]);
|
||||
csr->mmiodata[i] = dmc_header->mmiodata[i];
|
||||
}
|
||||
|
||||
@ -385,56 +342,80 @@ static void finish_csr_load(const struct firmware *fw, void *context)
|
||||
nbytes = dmc_header->fw_size * 4;
|
||||
if (nbytes > CSR_MAX_FW_SIZE) {
|
||||
DRM_ERROR("CSR firmware too big (%u) bytes\n", nbytes);
|
||||
goto out;
|
||||
return NULL;
|
||||
}
|
||||
csr->dmc_fw_size = dmc_header->fw_size;
|
||||
|
||||
csr->dmc_payload = kmalloc(nbytes, GFP_KERNEL);
|
||||
if (!csr->dmc_payload) {
|
||||
dmc_payload = kmalloc(nbytes, GFP_KERNEL);
|
||||
if (!dmc_payload) {
|
||||
DRM_ERROR("Memory allocation failed for dmc payload\n");
|
||||
goto out;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
dmc_payload = csr->dmc_payload;
|
||||
memcpy(dmc_payload, &fw->data[readcount], nbytes);
|
||||
|
||||
/* load csr program during system boot, as needed for DC states */
|
||||
intel_csr_load_program(dev);
|
||||
fw_loaded = true;
|
||||
return dmc_payload;
|
||||
}
|
||||
|
||||
static void csr_load_work_fn(struct work_struct *work)
|
||||
{
|
||||
struct drm_i915_private *dev_priv;
|
||||
struct intel_csr *csr;
|
||||
const struct firmware *fw;
|
||||
int ret;
|
||||
|
||||
dev_priv = container_of(work, typeof(*dev_priv), csr.work);
|
||||
csr = &dev_priv->csr;
|
||||
|
||||
ret = request_firmware(&fw, dev_priv->csr.fw_path,
|
||||
&dev_priv->dev->pdev->dev);
|
||||
if (!fw)
|
||||
goto out;
|
||||
|
||||
dev_priv->csr.dmc_payload = parse_csr_fw(dev_priv, fw);
|
||||
if (!dev_priv->csr.dmc_payload)
|
||||
goto out;
|
||||
|
||||
/* load csr program during system boot, as needed for DC states */
|
||||
intel_csr_load_program(dev_priv);
|
||||
|
||||
DRM_DEBUG_KMS("Finished loading %s\n", dev_priv->csr.fw_path);
|
||||
out:
|
||||
if (fw_loaded)
|
||||
intel_runtime_pm_put(dev_priv);
|
||||
else
|
||||
intel_csr_load_status_set(dev_priv, FW_FAILED);
|
||||
if (dev_priv->csr.dmc_payload) {
|
||||
intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
|
||||
|
||||
DRM_INFO("Finished loading %s (v%u.%u)\n",
|
||||
dev_priv->csr.fw_path,
|
||||
CSR_VERSION_MAJOR(csr->version),
|
||||
CSR_VERSION_MINOR(csr->version));
|
||||
} else {
|
||||
DRM_ERROR("Failed to load DMC firmware, disabling rpm\n");
|
||||
}
|
||||
|
||||
release_firmware(fw);
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_csr_ucode_init() - initialize the firmware loading.
|
||||
* @dev: drm device.
|
||||
* @dev_priv: i915 drm device.
|
||||
*
|
||||
* This function is called at the time of loading the display driver to read
|
||||
* firmware from a .bin file and copied into a internal memory.
|
||||
*/
|
||||
void intel_csr_ucode_init(struct drm_device *dev)
|
||||
void intel_csr_ucode_init(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_csr *csr = &dev_priv->csr;
|
||||
int ret;
|
||||
|
||||
if (!HAS_CSR(dev))
|
||||
INIT_WORK(&dev_priv->csr.work, csr_load_work_fn);
|
||||
|
||||
if (!HAS_CSR(dev_priv))
|
||||
return;
|
||||
|
||||
if (IS_SKYLAKE(dev))
|
||||
if (IS_SKYLAKE(dev_priv))
|
||||
csr->fw_path = I915_CSR_SKL;
|
||||
else if (IS_BROXTON(dev_priv))
|
||||
csr->fw_path = I915_CSR_BXT;
|
||||
else {
|
||||
DRM_ERROR("Unexpected: no known CSR firmware for platform\n");
|
||||
intel_csr_load_status_set(dev_priv, FW_FAILED);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -444,43 +425,24 @@ void intel_csr_ucode_init(struct drm_device *dev)
|
||||
* Obtain a runtime pm reference, until CSR is loaded,
|
||||
* to avoid entering runtime-suspend.
|
||||
*/
|
||||
intel_runtime_pm_get(dev_priv);
|
||||
intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
|
||||
|
||||
/* CSR supported for platform, load firmware */
|
||||
ret = request_firmware_nowait(THIS_MODULE, true, csr->fw_path,
|
||||
&dev_priv->dev->pdev->dev,
|
||||
GFP_KERNEL, dev_priv,
|
||||
finish_csr_load);
|
||||
if (ret) {
|
||||
i915_firmware_load_error_print(csr->fw_path, ret);
|
||||
intel_csr_load_status_set(dev_priv, FW_FAILED);
|
||||
}
|
||||
schedule_work(&dev_priv->csr.work);
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_csr_ucode_fini() - unload the CSR firmware.
|
||||
* @dev: drm device.
|
||||
* @dev_priv: i915 drm device.
|
||||
*
|
||||
* Firmmware unloading includes freeing the internal momory and reset the
|
||||
* firmware loading status.
|
||||
*/
|
||||
void intel_csr_ucode_fini(struct drm_device *dev)
|
||||
void intel_csr_ucode_fini(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
if (!HAS_CSR(dev))
|
||||
if (!HAS_CSR(dev_priv))
|
||||
return;
|
||||
|
||||
intel_csr_load_status_set(dev_priv, FW_FAILED);
|
||||
flush_work(&dev_priv->csr.work);
|
||||
|
||||
kfree(dev_priv->csr.dmc_payload);
|
||||
}
|
||||
|
||||
void assert_csr_loaded(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
WARN_ONCE(intel_csr_load_status_get(dev_priv) != FW_LOADED,
|
||||
"CSR is not loaded.\n");
|
||||
WARN_ONCE(!I915_READ(CSR_PROGRAM(0)),
|
||||
"CSR program storage start is NULL\n");
|
||||
WARN_ONCE(!I915_READ(CSR_SSP_BASE), "CSR SSP Base Not fine\n");
|
||||
WARN_ONCE(!I915_READ(CSR_HTP_SKL), "CSR HTP Not fine\n");
|
||||
}
|
||||
|
@ -133,12 +133,12 @@ static const struct ddi_buf_trans skl_ddi_translations_dp[] = {
|
||||
{ 0x00002016, 0x000000A0, 0x0 },
|
||||
{ 0x00005012, 0x0000009B, 0x0 },
|
||||
{ 0x00007011, 0x00000088, 0x0 },
|
||||
{ 0x00009010, 0x000000C7, 0x0 },
|
||||
{ 0x80009010, 0x000000C0, 0x1 }, /* Uses I_boost level 0x1 */
|
||||
{ 0x00002016, 0x0000009B, 0x0 },
|
||||
{ 0x00005012, 0x00000088, 0x0 },
|
||||
{ 0x00007011, 0x000000C7, 0x0 },
|
||||
{ 0x80007011, 0x000000C0, 0x1 }, /* Uses I_boost level 0x1 */
|
||||
{ 0x00002016, 0x000000DF, 0x0 },
|
||||
{ 0x00005012, 0x000000C7, 0x0 },
|
||||
{ 0x80005012, 0x000000C0, 0x1 }, /* Uses I_boost level 0x1 */
|
||||
};
|
||||
|
||||
/* Skylake U */
|
||||
@ -146,12 +146,12 @@ static const struct ddi_buf_trans skl_u_ddi_translations_dp[] = {
|
||||
{ 0x0000201B, 0x000000A2, 0x0 },
|
||||
{ 0x00005012, 0x00000088, 0x0 },
|
||||
{ 0x00007011, 0x00000087, 0x0 },
|
||||
{ 0x80009010, 0x000000C7, 0x1 }, /* Uses I_boost level 0x1 */
|
||||
{ 0x80009010, 0x000000C0, 0x1 }, /* Uses I_boost level 0x1 */
|
||||
{ 0x0000201B, 0x0000009D, 0x0 },
|
||||
{ 0x00005012, 0x000000C7, 0x0 },
|
||||
{ 0x00007011, 0x000000C7, 0x0 },
|
||||
{ 0x80005012, 0x000000C0, 0x1 }, /* Uses I_boost level 0x1 */
|
||||
{ 0x80007011, 0x000000C0, 0x1 }, /* Uses I_boost level 0x1 */
|
||||
{ 0x00002016, 0x00000088, 0x0 },
|
||||
{ 0x00005012, 0x000000C7, 0x0 },
|
||||
{ 0x80005012, 0x000000C0, 0x1 }, /* Uses I_boost level 0x1 */
|
||||
};
|
||||
|
||||
/* Skylake Y */
|
||||
@ -159,12 +159,12 @@ static const struct ddi_buf_trans skl_y_ddi_translations_dp[] = {
|
||||
{ 0x00000018, 0x000000A2, 0x0 },
|
||||
{ 0x00005012, 0x00000088, 0x0 },
|
||||
{ 0x00007011, 0x00000087, 0x0 },
|
||||
{ 0x80009010, 0x000000C7, 0x3 }, /* Uses I_boost level 0x3 */
|
||||
{ 0x80009010, 0x000000C0, 0x3 }, /* Uses I_boost level 0x3 */
|
||||
{ 0x00000018, 0x0000009D, 0x0 },
|
||||
{ 0x00005012, 0x000000C7, 0x0 },
|
||||
{ 0x00007011, 0x000000C7, 0x0 },
|
||||
{ 0x80005012, 0x000000C0, 0x3 }, /* Uses I_boost level 0x3 */
|
||||
{ 0x80007011, 0x000000C0, 0x3 }, /* Uses I_boost level 0x3 */
|
||||
{ 0x00000018, 0x00000088, 0x0 },
|
||||
{ 0x00005012, 0x000000C7, 0x0 },
|
||||
{ 0x80005012, 0x000000C0, 0x3 }, /* Uses I_boost level 0x3 */
|
||||
};
|
||||
|
||||
/*
|
||||
@ -345,7 +345,7 @@ enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder)
|
||||
static bool
|
||||
intel_dig_port_supports_hdmi(const struct intel_digital_port *intel_dig_port)
|
||||
{
|
||||
return intel_dig_port->hdmi.hdmi_reg;
|
||||
return i915_mmio_reg_valid(intel_dig_port->hdmi.hdmi_reg);
|
||||
}
|
||||
|
||||
static const struct ddi_buf_trans *skl_get_buf_trans_dp(struct drm_device *dev,
|
||||
@ -448,7 +448,7 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port,
|
||||
bxt_ddi_vswing_sequence(dev, hdmi_level, port,
|
||||
INTEL_OUTPUT_HDMI);
|
||||
return;
|
||||
} else if (IS_SKYLAKE(dev)) {
|
||||
} else if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
|
||||
ddi_translations_fdi = NULL;
|
||||
ddi_translations_dp =
|
||||
skl_get_buf_trans_dp(dev, &n_dp_entries);
|
||||
@ -576,7 +576,7 @@ void intel_prepare_ddi(struct drm_device *dev)
|
||||
static void intel_wait_ddi_buf_idle(struct drm_i915_private *dev_priv,
|
||||
enum port port)
|
||||
{
|
||||
uint32_t reg = DDI_BUF_CTL(port);
|
||||
i915_reg_t reg = DDI_BUF_CTL(port);
|
||||
int i;
|
||||
|
||||
for (i = 0; i < 16; i++) {
|
||||
@ -931,7 +931,8 @@ static void hsw_wrpll_update_rnp(uint64_t freq2k, unsigned budget,
|
||||
/* Otherwise a < c && b >= d, do nothing */
|
||||
}
|
||||
|
||||
static int hsw_ddi_calc_wrpll_link(struct drm_i915_private *dev_priv, int reg)
|
||||
static int hsw_ddi_calc_wrpll_link(struct drm_i915_private *dev_priv,
|
||||
i915_reg_t reg)
|
||||
{
|
||||
int refclk = LC_FREQ;
|
||||
int n, p, r;
|
||||
@ -967,7 +968,7 @@ static int hsw_ddi_calc_wrpll_link(struct drm_i915_private *dev_priv, int reg)
|
||||
static int skl_calc_wrpll_link(struct drm_i915_private *dev_priv,
|
||||
uint32_t dpll)
|
||||
{
|
||||
uint32_t cfgcr1_reg, cfgcr2_reg;
|
||||
i915_reg_t cfgcr1_reg, cfgcr2_reg;
|
||||
uint32_t cfgcr1_val, cfgcr2_val;
|
||||
uint32_t p0, p1, p2, dco_freq;
|
||||
|
||||
@ -1112,10 +1113,10 @@ static void hsw_ddi_clock_get(struct intel_encoder *encoder,
|
||||
link_clock = 270000;
|
||||
break;
|
||||
case PORT_CLK_SEL_WRPLL1:
|
||||
link_clock = hsw_ddi_calc_wrpll_link(dev_priv, WRPLL_CTL1);
|
||||
link_clock = hsw_ddi_calc_wrpll_link(dev_priv, WRPLL_CTL(0));
|
||||
break;
|
||||
case PORT_CLK_SEL_WRPLL2:
|
||||
link_clock = hsw_ddi_calc_wrpll_link(dev_priv, WRPLL_CTL2);
|
||||
link_clock = hsw_ddi_calc_wrpll_link(dev_priv, WRPLL_CTL(1));
|
||||
break;
|
||||
case PORT_CLK_SEL_SPLL:
|
||||
pll = I915_READ(SPLL_CTL) & SPLL_PLL_FREQ_MASK;
|
||||
@ -1184,7 +1185,7 @@ void intel_ddi_clock_get(struct intel_encoder *encoder,
|
||||
|
||||
if (INTEL_INFO(dev)->gen <= 8)
|
||||
hsw_ddi_clock_get(encoder, pipe_config);
|
||||
else if (IS_SKYLAKE(dev))
|
||||
else if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
|
||||
skl_ddi_clock_get(encoder, pipe_config);
|
||||
else if (IS_BROXTON(dev))
|
||||
bxt_ddi_clock_get(encoder, pipe_config);
|
||||
@ -1780,7 +1781,7 @@ bool intel_ddi_pll_select(struct intel_crtc *intel_crtc,
|
||||
struct intel_encoder *intel_encoder =
|
||||
intel_ddi_get_crtc_new_encoder(crtc_state);
|
||||
|
||||
if (IS_SKYLAKE(dev))
|
||||
if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
|
||||
return skl_ddi_pll_select(intel_crtc, crtc_state,
|
||||
intel_encoder);
|
||||
else if (IS_BROXTON(dev))
|
||||
@ -1942,7 +1943,7 @@ void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc)
|
||||
void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv,
|
||||
enum transcoder cpu_transcoder)
|
||||
{
|
||||
uint32_t reg = TRANS_DDI_FUNC_CTL(cpu_transcoder);
|
||||
i915_reg_t reg = TRANS_DDI_FUNC_CTL(cpu_transcoder);
|
||||
uint32_t val = I915_READ(reg);
|
||||
|
||||
val &= ~(TRANS_DDI_FUNC_ENABLE | TRANS_DDI_PORT_MASK | TRANS_DDI_DP_VC_PAYLOAD_ALLOC);
|
||||
@ -2097,21 +2098,21 @@ static void skl_ddi_set_iboost(struct drm_device *dev, u32 level,
|
||||
iboost = dp_iboost;
|
||||
} else {
|
||||
ddi_translations = skl_get_buf_trans_dp(dev, &n_entries);
|
||||
iboost = ddi_translations[port].i_boost;
|
||||
iboost = ddi_translations[level].i_boost;
|
||||
}
|
||||
} else if (type == INTEL_OUTPUT_EDP) {
|
||||
if (dp_iboost) {
|
||||
iboost = dp_iboost;
|
||||
} else {
|
||||
ddi_translations = skl_get_buf_trans_edp(dev, &n_entries);
|
||||
iboost = ddi_translations[port].i_boost;
|
||||
iboost = ddi_translations[level].i_boost;
|
||||
}
|
||||
} else if (type == INTEL_OUTPUT_HDMI) {
|
||||
if (hdmi_iboost) {
|
||||
iboost = hdmi_iboost;
|
||||
} else {
|
||||
ddi_translations = skl_get_buf_trans_hdmi(dev, &n_entries);
|
||||
iboost = ddi_translations[port].i_boost;
|
||||
iboost = ddi_translations[level].i_boost;
|
||||
}
|
||||
} else {
|
||||
return;
|
||||
@ -2263,7 +2264,7 @@ uint32_t ddi_signal_levels(struct intel_dp *intel_dp)
|
||||
|
||||
level = translate_signal_level(signal_levels);
|
||||
|
||||
if (IS_SKYLAKE(dev))
|
||||
if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
|
||||
skl_ddi_set_iboost(dev, level, port, encoder->type);
|
||||
else if (IS_BROXTON(dev))
|
||||
bxt_ddi_vswing_sequence(dev, level, port, encoder->type);
|
||||
@ -2271,6 +2272,50 @@ uint32_t ddi_signal_levels(struct intel_dp *intel_dp)
|
||||
return DDI_BUF_TRANS_SELECT(level);
|
||||
}
|
||||
|
||||
void intel_ddi_clk_select(struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *pipe_config)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
enum port port = intel_ddi_get_encoder_port(encoder);
|
||||
|
||||
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
|
||||
uint32_t dpll = pipe_config->ddi_pll_sel;
|
||||
uint32_t val;
|
||||
|
||||
/*
|
||||
* DPLL0 is used for eDP and is the only "private" DPLL (as
|
||||
* opposed to shared) on SKL
|
||||
*/
|
||||
if (encoder->type == INTEL_OUTPUT_EDP) {
|
||||
WARN_ON(dpll != SKL_DPLL0);
|
||||
|
||||
val = I915_READ(DPLL_CTRL1);
|
||||
|
||||
val &= ~(DPLL_CTRL1_HDMI_MODE(dpll) |
|
||||
DPLL_CTRL1_SSC(dpll) |
|
||||
DPLL_CTRL1_LINK_RATE_MASK(dpll));
|
||||
val |= pipe_config->dpll_hw_state.ctrl1 << (dpll * 6);
|
||||
|
||||
I915_WRITE(DPLL_CTRL1, val);
|
||||
POSTING_READ(DPLL_CTRL1);
|
||||
}
|
||||
|
||||
/* DDI -> PLL mapping */
|
||||
val = I915_READ(DPLL_CTRL2);
|
||||
|
||||
val &= ~(DPLL_CTRL2_DDI_CLK_OFF(port) |
|
||||
DPLL_CTRL2_DDI_CLK_SEL_MASK(port));
|
||||
val |= (DPLL_CTRL2_DDI_CLK_SEL(dpll, port) |
|
||||
DPLL_CTRL2_DDI_SEL_OVERRIDE(port));
|
||||
|
||||
I915_WRITE(DPLL_CTRL2, val);
|
||||
|
||||
} else if (INTEL_INFO(dev_priv)->gen < 9) {
|
||||
WARN_ON(pipe_config->ddi_pll_sel == PORT_CLK_SEL_NONE);
|
||||
I915_WRITE(PORT_CLK_SEL(port), pipe_config->ddi_pll_sel);
|
||||
}
|
||||
}
|
||||
|
||||
static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder)
|
||||
{
|
||||
struct drm_encoder *encoder = &intel_encoder->base;
|
||||
@ -2286,42 +2331,7 @@ static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder)
|
||||
intel_edp_panel_on(intel_dp);
|
||||
}
|
||||
|
||||
if (IS_SKYLAKE(dev)) {
|
||||
uint32_t dpll = crtc->config->ddi_pll_sel;
|
||||
uint32_t val;
|
||||
|
||||
/*
|
||||
* DPLL0 is used for eDP and is the only "private" DPLL (as
|
||||
* opposed to shared) on SKL
|
||||
*/
|
||||
if (type == INTEL_OUTPUT_EDP) {
|
||||
WARN_ON(dpll != SKL_DPLL0);
|
||||
|
||||
val = I915_READ(DPLL_CTRL1);
|
||||
|
||||
val &= ~(DPLL_CTRL1_HDMI_MODE(dpll) |
|
||||
DPLL_CTRL1_SSC(dpll) |
|
||||
DPLL_CTRL1_LINK_RATE_MASK(dpll));
|
||||
val |= crtc->config->dpll_hw_state.ctrl1 << (dpll * 6);
|
||||
|
||||
I915_WRITE(DPLL_CTRL1, val);
|
||||
POSTING_READ(DPLL_CTRL1);
|
||||
}
|
||||
|
||||
/* DDI -> PLL mapping */
|
||||
val = I915_READ(DPLL_CTRL2);
|
||||
|
||||
val &= ~(DPLL_CTRL2_DDI_CLK_OFF(port) |
|
||||
DPLL_CTRL2_DDI_CLK_SEL_MASK(port));
|
||||
val |= (DPLL_CTRL2_DDI_CLK_SEL(dpll, port) |
|
||||
DPLL_CTRL2_DDI_SEL_OVERRIDE(port));
|
||||
|
||||
I915_WRITE(DPLL_CTRL2, val);
|
||||
|
||||
} else if (INTEL_INFO(dev)->gen < 9) {
|
||||
WARN_ON(crtc->config->ddi_pll_sel == PORT_CLK_SEL_NONE);
|
||||
I915_WRITE(PORT_CLK_SEL(port), crtc->config->ddi_pll_sel);
|
||||
}
|
||||
intel_ddi_clk_select(intel_encoder, crtc->config);
|
||||
|
||||
if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
|
||||
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
|
||||
@ -2381,7 +2391,7 @@ static void intel_ddi_post_disable(struct intel_encoder *intel_encoder)
|
||||
intel_edp_panel_off(intel_dp);
|
||||
}
|
||||
|
||||
if (IS_SKYLAKE(dev))
|
||||
if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
|
||||
I915_WRITE(DPLL_CTRL2, (I915_READ(DPLL_CTRL2) |
|
||||
DPLL_CTRL2_DDI_CLK_OFF(port)));
|
||||
else if (INTEL_INFO(dev)->gen < 9)
|
||||
@ -2553,7 +2563,7 @@ static const char * const skl_ddi_pll_names[] = {
|
||||
};
|
||||
|
||||
struct skl_dpll_regs {
|
||||
u32 ctl, cfgcr1, cfgcr2;
|
||||
i915_reg_t ctl, cfgcr1, cfgcr2;
|
||||
};
|
||||
|
||||
/* this array is indexed by the *shared* pll id */
|
||||
@ -2566,13 +2576,13 @@ static const struct skl_dpll_regs skl_dpll_regs[3] = {
|
||||
},
|
||||
{
|
||||
/* DPLL 2 */
|
||||
.ctl = WRPLL_CTL1,
|
||||
.ctl = WRPLL_CTL(0),
|
||||
.cfgcr1 = DPLL_CFGCR1(SKL_DPLL2),
|
||||
.cfgcr2 = DPLL_CFGCR2(SKL_DPLL2),
|
||||
},
|
||||
{
|
||||
/* DPLL 3 */
|
||||
.ctl = WRPLL_CTL2,
|
||||
.ctl = WRPLL_CTL(1),
|
||||
.cfgcr1 = DPLL_CFGCR1(SKL_DPLL3),
|
||||
.cfgcr2 = DPLL_CFGCR2(SKL_DPLL3),
|
||||
},
|
||||
@ -2992,22 +3002,22 @@ void intel_ddi_pll_init(struct drm_device *dev)
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
uint32_t val = I915_READ(LCPLL_CTL);
|
||||
|
||||
if (IS_SKYLAKE(dev))
|
||||
if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
|
||||
skl_shared_dplls_init(dev_priv);
|
||||
else if (IS_BROXTON(dev))
|
||||
bxt_shared_dplls_init(dev_priv);
|
||||
else
|
||||
hsw_shared_dplls_init(dev_priv);
|
||||
|
||||
if (IS_SKYLAKE(dev)) {
|
||||
if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
|
||||
int cdclk_freq;
|
||||
|
||||
cdclk_freq = dev_priv->display.get_display_clock_speed(dev);
|
||||
dev_priv->skl_boot_cdclk = cdclk_freq;
|
||||
if (skl_sanitize_cdclk(dev_priv))
|
||||
DRM_DEBUG_KMS("Sanitized cdclk programmed by pre-os\n");
|
||||
if (!(I915_READ(LCPLL1_CTL) & LCPLL_PLL_ENABLE))
|
||||
DRM_ERROR("LCPLL1 is disabled\n");
|
||||
else
|
||||
intel_display_power_get(dev_priv, POWER_DOMAIN_PLLS);
|
||||
} else if (IS_BROXTON(dev)) {
|
||||
broxton_init_cdclk(dev);
|
||||
broxton_ddi_phy_init(dev);
|
||||
@ -3026,11 +3036,11 @@ void intel_ddi_pll_init(struct drm_device *dev)
|
||||
}
|
||||
}
|
||||
|
||||
void intel_ddi_prepare_link_retrain(struct drm_encoder *encoder)
|
||||
void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp)
|
||||
{
|
||||
struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
|
||||
struct intel_dp *intel_dp = &intel_dig_port->dp;
|
||||
struct drm_i915_private *dev_priv = encoder->dev->dev_private;
|
||||
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
|
||||
struct drm_i915_private *dev_priv =
|
||||
to_i915(intel_dig_port->base.base.dev);
|
||||
enum port port = intel_dig_port->port;
|
||||
uint32_t val;
|
||||
bool wait = false;
|
||||
@ -3289,6 +3299,20 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
|
||||
(DDI_BUF_PORT_REVERSAL |
|
||||
DDI_A_4_LANES);
|
||||
|
||||
/*
|
||||
* Bspec says that DDI_A_4_LANES is the only supported configuration
|
||||
* for Broxton. Yet some BIOS fail to set this bit on port A if eDP
|
||||
* wasn't lit up at boot. Force this bit on in our internal
|
||||
* configuration so that we use the proper lane count for our
|
||||
* calculations.
|
||||
*/
|
||||
if (IS_BROXTON(dev) && port == PORT_A) {
|
||||
if (!(intel_dig_port->saved_port_bits & DDI_A_4_LANES)) {
|
||||
DRM_DEBUG_KMS("BXT BIOS forgot to set DDI_A_4_LANES for port A; fixing\n");
|
||||
intel_dig_port->saved_port_bits |= DDI_A_4_LANES;
|
||||
}
|
||||
}
|
||||
|
||||
intel_encoder->type = INTEL_OUTPUT_UNKNOWN;
|
||||
intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
|
||||
intel_encoder->cloneable = 0;
|
||||
@ -3302,8 +3326,7 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
|
||||
* On BXT A0/A1, sw needs to activate DDIA HPD logic and
|
||||
* interrupts to check the external panel connection.
|
||||
*/
|
||||
if (IS_BROXTON(dev_priv) && (INTEL_REVID(dev) < BXT_REVID_B0)
|
||||
&& port == PORT_B)
|
||||
if (IS_BXT_REVID(dev, 0, BXT_REVID_A1) && port == PORT_B)
|
||||
dev_priv->hotplug.irq_port[PORT_A] = intel_dig_port;
|
||||
else
|
||||
dev_priv->hotplug.irq_port[port] = intel_dig_port;
|
||||
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
323
drivers/gpu/drm/i915/intel_dp_link_training.c
Normal file
323
drivers/gpu/drm/i915/intel_dp_link_training.c
Normal file
@ -0,0 +1,323 @@
|
||||
/*
|
||||
* Copyright © 2008-2015 Intel Corporation
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the next
|
||||
* paragraph) shall be included in all copies or substantial portions of the
|
||||
* Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
||||
* IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include "intel_drv.h"
|
||||
|
||||
static void
|
||||
intel_get_adjust_train(struct intel_dp *intel_dp,
|
||||
const uint8_t link_status[DP_LINK_STATUS_SIZE])
|
||||
{
|
||||
uint8_t v = 0;
|
||||
uint8_t p = 0;
|
||||
int lane;
|
||||
uint8_t voltage_max;
|
||||
uint8_t preemph_max;
|
||||
|
||||
for (lane = 0; lane < intel_dp->lane_count; lane++) {
|
||||
uint8_t this_v = drm_dp_get_adjust_request_voltage(link_status, lane);
|
||||
uint8_t this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane);
|
||||
|
||||
if (this_v > v)
|
||||
v = this_v;
|
||||
if (this_p > p)
|
||||
p = this_p;
|
||||
}
|
||||
|
||||
voltage_max = intel_dp_voltage_max(intel_dp);
|
||||
if (v >= voltage_max)
|
||||
v = voltage_max | DP_TRAIN_MAX_SWING_REACHED;
|
||||
|
||||
preemph_max = intel_dp_pre_emphasis_max(intel_dp, v);
|
||||
if (p >= preemph_max)
|
||||
p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
|
||||
|
||||
for (lane = 0; lane < 4; lane++)
|
||||
intel_dp->train_set[lane] = v | p;
|
||||
}
|
||||
|
||||
static bool
|
||||
intel_dp_set_link_train(struct intel_dp *intel_dp,
|
||||
uint8_t dp_train_pat)
|
||||
{
|
||||
uint8_t buf[sizeof(intel_dp->train_set) + 1];
|
||||
int ret, len;
|
||||
|
||||
intel_dp_program_link_training_pattern(intel_dp, dp_train_pat);
|
||||
|
||||
buf[0] = dp_train_pat;
|
||||
if ((dp_train_pat & DP_TRAINING_PATTERN_MASK) ==
|
||||
DP_TRAINING_PATTERN_DISABLE) {
|
||||
/* don't write DP_TRAINING_LANEx_SET on disable */
|
||||
len = 1;
|
||||
} else {
|
||||
/* DP_TRAINING_LANEx_SET follow DP_TRAINING_PATTERN_SET */
|
||||
memcpy(buf + 1, intel_dp->train_set, intel_dp->lane_count);
|
||||
len = intel_dp->lane_count + 1;
|
||||
}
|
||||
|
||||
ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_PATTERN_SET,
|
||||
buf, len);
|
||||
|
||||
return ret == len;
|
||||
}
|
||||
|
||||
static bool
|
||||
intel_dp_reset_link_train(struct intel_dp *intel_dp,
|
||||
uint8_t dp_train_pat)
|
||||
{
|
||||
if (!intel_dp->train_set_valid)
|
||||
memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set));
|
||||
intel_dp_set_signal_levels(intel_dp);
|
||||
return intel_dp_set_link_train(intel_dp, dp_train_pat);
|
||||
}
|
||||
|
||||
static bool
|
||||
intel_dp_update_link_train(struct intel_dp *intel_dp)
|
||||
{
|
||||
int ret;
|
||||
|
||||
intel_dp_set_signal_levels(intel_dp);
|
||||
|
||||
ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_LANE0_SET,
|
||||
intel_dp->train_set, intel_dp->lane_count);
|
||||
|
||||
return ret == intel_dp->lane_count;
|
||||
}
|
||||
|
||||
/* Enable corresponding port and start training pattern 1 */
|
||||
static void
|
||||
intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp)
|
||||
{
|
||||
int i;
|
||||
uint8_t voltage;
|
||||
int voltage_tries, loop_tries;
|
||||
uint8_t link_config[2];
|
||||
uint8_t link_bw, rate_select;
|
||||
|
||||
if (intel_dp->prepare_link_retrain)
|
||||
intel_dp->prepare_link_retrain(intel_dp);
|
||||
|
||||
intel_dp_compute_rate(intel_dp, intel_dp->link_rate,
|
||||
&link_bw, &rate_select);
|
||||
|
||||
/* Write the link configuration data */
|
||||
link_config[0] = link_bw;
|
||||
link_config[1] = intel_dp->lane_count;
|
||||
if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
|
||||
link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
|
||||
drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_BW_SET, link_config, 2);
|
||||
if (intel_dp->num_sink_rates)
|
||||
drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_RATE_SET,
|
||||
&rate_select, 1);
|
||||
|
||||
link_config[0] = 0;
|
||||
link_config[1] = DP_SET_ANSI_8B10B;
|
||||
drm_dp_dpcd_write(&intel_dp->aux, DP_DOWNSPREAD_CTRL, link_config, 2);
|
||||
|
||||
intel_dp->DP |= DP_PORT_EN;
|
||||
|
||||
/* clock recovery */
|
||||
if (!intel_dp_reset_link_train(intel_dp,
|
||||
DP_TRAINING_PATTERN_1 |
|
||||
DP_LINK_SCRAMBLING_DISABLE)) {
|
||||
DRM_ERROR("failed to enable link training\n");
|
||||
return;
|
||||
}
|
||||
|
||||
voltage = 0xff;
|
||||
voltage_tries = 0;
|
||||
loop_tries = 0;
|
||||
for (;;) {
|
||||
uint8_t link_status[DP_LINK_STATUS_SIZE];
|
||||
|
||||
drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd);
|
||||
if (!intel_dp_get_link_status(intel_dp, link_status)) {
|
||||
DRM_ERROR("failed to get link status\n");
|
||||
break;
|
||||
}
|
||||
|
||||
if (drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
|
||||
DRM_DEBUG_KMS("clock recovery OK\n");
|
||||
break;
|
||||
}
|
||||
|
||||
/*
|
||||
* if we used previously trained voltage and pre-emphasis values
|
||||
* and we don't get clock recovery, reset link training values
|
||||
*/
|
||||
if (intel_dp->train_set_valid) {
|
||||
DRM_DEBUG_KMS("clock recovery not ok, reset");
|
||||
/* clear the flag as we are not reusing train set */
|
||||
intel_dp->train_set_valid = false;
|
||||
if (!intel_dp_reset_link_train(intel_dp,
|
||||
DP_TRAINING_PATTERN_1 |
|
||||
DP_LINK_SCRAMBLING_DISABLE)) {
|
||||
DRM_ERROR("failed to enable link training\n");
|
||||
return;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
/* Check to see if we've tried the max voltage */
|
||||
for (i = 0; i < intel_dp->lane_count; i++)
|
||||
if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
|
||||
break;
|
||||
if (i == intel_dp->lane_count) {
|
||||
++loop_tries;
|
||||
if (loop_tries == 5) {
|
||||
DRM_ERROR("too many full retries, give up\n");
|
||||
break;
|
||||
}
|
||||
intel_dp_reset_link_train(intel_dp,
|
||||
DP_TRAINING_PATTERN_1 |
|
||||
DP_LINK_SCRAMBLING_DISABLE);
|
||||
voltage_tries = 0;
|
||||
continue;
|
||||
}
|
||||
|
||||
/* Check to see if we've tried the same voltage 5 times */
|
||||
if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
|
||||
++voltage_tries;
|
||||
if (voltage_tries == 5) {
|
||||
DRM_ERROR("too many voltage retries, give up\n");
|
||||
break;
|
||||
}
|
||||
} else
|
||||
voltage_tries = 0;
|
||||
voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
|
||||
|
||||
/* Update training set as requested by target */
|
||||
intel_get_adjust_train(intel_dp, link_status);
|
||||
if (!intel_dp_update_link_train(intel_dp)) {
|
||||
DRM_ERROR("failed to update link training\n");
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp)
|
||||
{
|
||||
bool channel_eq = false;
|
||||
int tries, cr_tries;
|
||||
uint32_t training_pattern = DP_TRAINING_PATTERN_2;
|
||||
|
||||
/*
|
||||
* Training Pattern 3 for HBR2 or 1.2 devices that support it.
|
||||
*
|
||||
* Intel platforms that support HBR2 also support TPS3. TPS3 support is
|
||||
* also mandatory for downstream devices that support HBR2.
|
||||
*
|
||||
* Due to WaDisableHBR2 SKL < B0 is the only exception where TPS3 is
|
||||
* supported but still not enabled.
|
||||
*/
|
||||
if (intel_dp_source_supports_hbr2(intel_dp) &&
|
||||
drm_dp_tps3_supported(intel_dp->dpcd))
|
||||
training_pattern = DP_TRAINING_PATTERN_3;
|
||||
else if (intel_dp->link_rate == 540000)
|
||||
DRM_ERROR("5.4 Gbps link rate without HBR2/TPS3 support\n");
|
||||
|
||||
/* channel equalization */
|
||||
if (!intel_dp_set_link_train(intel_dp,
|
||||
training_pattern |
|
||||
DP_LINK_SCRAMBLING_DISABLE)) {
|
||||
DRM_ERROR("failed to start channel equalization\n");
|
||||
return;
|
||||
}
|
||||
|
||||
tries = 0;
|
||||
cr_tries = 0;
|
||||
channel_eq = false;
|
||||
for (;;) {
|
||||
uint8_t link_status[DP_LINK_STATUS_SIZE];
|
||||
|
||||
if (cr_tries > 5) {
|
||||
DRM_ERROR("failed to train DP, aborting\n");
|
||||
break;
|
||||
}
|
||||
|
||||
drm_dp_link_train_channel_eq_delay(intel_dp->dpcd);
|
||||
if (!intel_dp_get_link_status(intel_dp, link_status)) {
|
||||
DRM_ERROR("failed to get link status\n");
|
||||
break;
|
||||
}
|
||||
|
||||
/* Make sure clock is still ok */
|
||||
if (!drm_dp_clock_recovery_ok(link_status,
|
||||
intel_dp->lane_count)) {
|
||||
intel_dp->train_set_valid = false;
|
||||
intel_dp_link_training_clock_recovery(intel_dp);
|
||||
intel_dp_set_link_train(intel_dp,
|
||||
training_pattern |
|
||||
DP_LINK_SCRAMBLING_DISABLE);
|
||||
cr_tries++;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (drm_dp_channel_eq_ok(link_status,
|
||||
intel_dp->lane_count)) {
|
||||
channel_eq = true;
|
||||
break;
|
||||
}
|
||||
|
||||
/* Try 5 times, then try clock recovery if that fails */
|
||||
if (tries > 5) {
|
||||
intel_dp->train_set_valid = false;
|
||||
intel_dp_link_training_clock_recovery(intel_dp);
|
||||
intel_dp_set_link_train(intel_dp,
|
||||
training_pattern |
|
||||
DP_LINK_SCRAMBLING_DISABLE);
|
||||
tries = 0;
|
||||
cr_tries++;
|
||||
continue;
|
||||
}
|
||||
|
||||
/* Update training set as requested by target */
|
||||
intel_get_adjust_train(intel_dp, link_status);
|
||||
if (!intel_dp_update_link_train(intel_dp)) {
|
||||
DRM_ERROR("failed to update link training\n");
|
||||
break;
|
||||
}
|
||||
++tries;
|
||||
}
|
||||
|
||||
intel_dp_set_idle_link_train(intel_dp);
|
||||
|
||||
if (channel_eq) {
|
||||
intel_dp->train_set_valid = true;
|
||||
DRM_DEBUG_KMS("Channel EQ done. DP Training successful\n");
|
||||
}
|
||||
}
|
||||
|
||||
void intel_dp_stop_link_train(struct intel_dp *intel_dp)
|
||||
{
|
||||
intel_dp_set_link_train(intel_dp,
|
||||
DP_TRAINING_PATTERN_DISABLE);
|
||||
}
|
||||
|
||||
void
|
||||
intel_dp_start_link_train(struct intel_dp *intel_dp)
|
||||
{
|
||||
intel_dp_link_training_clock_recovery(intel_dp);
|
||||
intel_dp_link_training_channel_equalization(intel_dp);
|
||||
}
|
@ -173,20 +173,14 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder)
|
||||
intel_mst->port = found->port;
|
||||
|
||||
if (intel_dp->active_mst_links == 0) {
|
||||
enum port port = intel_ddi_get_encoder_port(encoder);
|
||||
intel_ddi_clk_select(encoder, intel_crtc->config);
|
||||
|
||||
intel_dp_set_link_params(intel_dp, intel_crtc->config);
|
||||
|
||||
/* FIXME: add support for SKL */
|
||||
if (INTEL_INFO(dev)->gen < 9)
|
||||
I915_WRITE(PORT_CLK_SEL(port),
|
||||
intel_crtc->config->ddi_pll_sel);
|
||||
|
||||
intel_ddi_init_dp_buf_reg(&intel_dig_port->base);
|
||||
|
||||
intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
|
||||
|
||||
|
||||
intel_dp_start_link_train(intel_dp);
|
||||
intel_dp_stop_link_train(intel_dp);
|
||||
}
|
||||
@ -414,7 +408,10 @@ static void intel_connector_add_to_fbdev(struct intel_connector *connector)
|
||||
{
|
||||
#ifdef CONFIG_DRM_FBDEV_EMULATION
|
||||
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
|
||||
drm_fb_helper_add_one_connector(&dev_priv->fbdev->helper, &connector->base);
|
||||
|
||||
if (dev_priv->fbdev)
|
||||
drm_fb_helper_add_one_connector(&dev_priv->fbdev->helper,
|
||||
&connector->base);
|
||||
#endif
|
||||
}
|
||||
|
||||
@ -422,7 +419,10 @@ static void intel_connector_remove_from_fbdev(struct intel_connector *connector)
|
||||
{
|
||||
#ifdef CONFIG_DRM_FBDEV_EMULATION
|
||||
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
|
||||
drm_fb_helper_remove_one_connector(&dev_priv->fbdev->helper, &connector->base);
|
||||
|
||||
if (dev_priv->fbdev)
|
||||
drm_fb_helper_remove_one_connector(&dev_priv->fbdev->helper,
|
||||
&connector->base);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -248,6 +248,7 @@ struct intel_atomic_state {
|
||||
unsigned int cdclk;
|
||||
bool dpll_set;
|
||||
struct intel_shared_dpll_config shared_dpll[I915_NUM_PLLS];
|
||||
struct intel_wm_config wm_config;
|
||||
};
|
||||
|
||||
struct intel_plane_state {
|
||||
@ -278,6 +279,9 @@ struct intel_plane_state {
|
||||
int scaler_id;
|
||||
|
||||
struct drm_intel_sprite_colorkey ckey;
|
||||
|
||||
/* async flip related structures */
|
||||
struct drm_i915_gem_request *wait_req;
|
||||
};
|
||||
|
||||
struct intel_initial_plane_config {
|
||||
@ -332,6 +336,21 @@ struct intel_crtc_scaler_state {
|
||||
/* drm_mode->private_flags */
|
||||
#define I915_MODE_FLAG_INHERITED 1
|
||||
|
||||
struct intel_pipe_wm {
|
||||
struct intel_wm_level wm[5];
|
||||
uint32_t linetime;
|
||||
bool fbc_wm_enabled;
|
||||
bool pipe_enabled;
|
||||
bool sprites_enabled;
|
||||
bool sprites_scaled;
|
||||
};
|
||||
|
||||
struct skl_pipe_wm {
|
||||
struct skl_wm_level wm[8];
|
||||
struct skl_wm_level trans_wm;
|
||||
uint32_t linetime;
|
||||
};
|
||||
|
||||
struct intel_crtc_state {
|
||||
struct drm_crtc_state base;
|
||||
|
||||
@ -466,6 +485,20 @@ struct intel_crtc_state {
|
||||
|
||||
/* w/a for waiting 2 vblanks during crtc enable */
|
||||
enum pipe hsw_workaround_pipe;
|
||||
|
||||
/* IVB sprite scaling w/a (WaCxSRDisabledForSpriteScaling:ivb) */
|
||||
bool disable_lp_wm;
|
||||
|
||||
struct {
|
||||
/*
|
||||
* optimal watermarks, programmed post-vblank when this state
|
||||
* is committed
|
||||
*/
|
||||
union {
|
||||
struct intel_pipe_wm ilk;
|
||||
struct skl_pipe_wm skl;
|
||||
} optimal;
|
||||
} wm;
|
||||
};
|
||||
|
||||
struct vlv_wm_state {
|
||||
@ -477,26 +510,12 @@ struct vlv_wm_state {
|
||||
bool cxsr;
|
||||
};
|
||||
|
||||
struct intel_pipe_wm {
|
||||
struct intel_wm_level wm[5];
|
||||
uint32_t linetime;
|
||||
bool fbc_wm_enabled;
|
||||
bool pipe_enabled;
|
||||
bool sprites_enabled;
|
||||
bool sprites_scaled;
|
||||
};
|
||||
|
||||
struct intel_mmio_flip {
|
||||
struct work_struct work;
|
||||
struct drm_i915_private *i915;
|
||||
struct drm_i915_gem_request *req;
|
||||
struct intel_crtc *crtc;
|
||||
};
|
||||
|
||||
struct skl_pipe_wm {
|
||||
struct skl_wm_level wm[8];
|
||||
struct skl_wm_level trans_wm;
|
||||
uint32_t linetime;
|
||||
unsigned int rotation;
|
||||
};
|
||||
|
||||
/*
|
||||
@ -507,13 +526,11 @@ struct skl_pipe_wm {
|
||||
*/
|
||||
struct intel_crtc_atomic_commit {
|
||||
/* Sleepable operations to perform before commit */
|
||||
bool wait_for_flips;
|
||||
bool disable_fbc;
|
||||
bool disable_ips;
|
||||
bool disable_cxsr;
|
||||
bool pre_disable_primary;
|
||||
bool update_wm_pre, update_wm_post;
|
||||
unsigned disabled_planes;
|
||||
|
||||
/* Sleepable operations to perform after commit */
|
||||
unsigned fb_bits;
|
||||
@ -566,9 +583,10 @@ struct intel_crtc {
|
||||
/* per-pipe watermark state */
|
||||
struct {
|
||||
/* watermarks currently being used */
|
||||
struct intel_pipe_wm active;
|
||||
/* SKL wm values currently in use */
|
||||
struct skl_pipe_wm skl_active;
|
||||
union {
|
||||
struct intel_pipe_wm ilk;
|
||||
struct skl_pipe_wm skl;
|
||||
} active;
|
||||
/* allow CxSR on this pipe */
|
||||
bool cxsr_allowed;
|
||||
} wm;
|
||||
@ -676,7 +694,7 @@ struct cxsr_latency {
|
||||
#define intel_fb_obj(x) (x ? to_intel_framebuffer(x)->obj : NULL)
|
||||
|
||||
struct intel_hdmi {
|
||||
u32 hdmi_reg;
|
||||
i915_reg_t hdmi_reg;
|
||||
int ddc_bus;
|
||||
bool limited_color_range;
|
||||
bool color_range_auto;
|
||||
@ -718,15 +736,10 @@ enum link_m_n_set {
|
||||
M2_N2
|
||||
};
|
||||
|
||||
struct sink_crc {
|
||||
bool started;
|
||||
u8 last_crc[6];
|
||||
int last_count;
|
||||
};
|
||||
|
||||
struct intel_dp {
|
||||
uint32_t output_reg;
|
||||
uint32_t aux_ch_ctl_reg;
|
||||
i915_reg_t output_reg;
|
||||
i915_reg_t aux_ch_ctl_reg;
|
||||
i915_reg_t aux_ch_data_reg[5];
|
||||
uint32_t DP;
|
||||
int link_rate;
|
||||
uint8_t lane_count;
|
||||
@ -740,7 +753,6 @@ struct intel_dp {
|
||||
/* sink rates as reported by DP_SUPPORTED_LINK_RATES */
|
||||
uint8_t num_sink_rates;
|
||||
int sink_rates[DP_MAX_SUPPORTED_RATES];
|
||||
struct sink_crc sink_crc;
|
||||
struct drm_dp_aux aux;
|
||||
uint8_t train_set[4];
|
||||
int panel_power_up_delay;
|
||||
@ -782,6 +794,10 @@ struct intel_dp {
|
||||
bool has_aux_irq,
|
||||
int send_bytes,
|
||||
uint32_t aux_clock_divider);
|
||||
|
||||
/* This is called before a link training is starterd */
|
||||
void (*prepare_link_retrain)(struct intel_dp *intel_dp);
|
||||
|
||||
bool train_set_valid;
|
||||
|
||||
/* Displayport compliance testing */
|
||||
@ -941,7 +957,8 @@ void intel_cpu_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv,
|
||||
enum pipe pipe);
|
||||
void intel_pch_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv,
|
||||
enum transcoder pch_transcoder);
|
||||
void i9xx_check_fifo_underruns(struct drm_i915_private *dev_priv);
|
||||
void intel_check_cpu_fifo_underruns(struct drm_i915_private *dev_priv);
|
||||
void intel_check_pch_fifo_underruns(struct drm_i915_private *dev_priv);
|
||||
|
||||
/* i915_irq.c */
|
||||
void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
|
||||
@ -972,6 +989,8 @@ void intel_crt_init(struct drm_device *dev);
|
||||
|
||||
|
||||
/* intel_ddi.c */
|
||||
void intel_ddi_clk_select(struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *pipe_config);
|
||||
void intel_prepare_ddi(struct drm_device *dev);
|
||||
void hsw_fdi_link_train(struct drm_crtc *crtc);
|
||||
void intel_ddi_init(struct drm_device *dev, enum port port);
|
||||
@ -986,7 +1005,7 @@ void intel_ddi_disable_pipe_clock(struct intel_crtc *intel_crtc);
|
||||
bool intel_ddi_pll_select(struct intel_crtc *crtc,
|
||||
struct intel_crtc_state *crtc_state);
|
||||
void intel_ddi_set_pipe_settings(struct drm_crtc *crtc);
|
||||
void intel_ddi_prepare_link_retrain(struct drm_encoder *encoder);
|
||||
void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp);
|
||||
bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector);
|
||||
void intel_ddi_fdi_disable(struct drm_crtc *crtc);
|
||||
void intel_ddi_get_config(struct intel_encoder *encoder,
|
||||
@ -1054,6 +1073,15 @@ intel_wait_for_vblank(struct drm_device *dev, int pipe)
|
||||
{
|
||||
drm_wait_one_vblank(dev, pipe);
|
||||
}
|
||||
static inline void
|
||||
intel_wait_for_vblank_if_active(struct drm_device *dev, int pipe)
|
||||
{
|
||||
const struct intel_crtc *crtc =
|
||||
to_intel_crtc(intel_get_crtc_for_pipe(dev, pipe));
|
||||
|
||||
if (crtc->active)
|
||||
intel_wait_for_vblank(dev, pipe);
|
||||
}
|
||||
int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp);
|
||||
void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
|
||||
struct intel_digital_port *dport,
|
||||
@ -1067,9 +1095,7 @@ void intel_release_load_detect_pipe(struct drm_connector *connector,
|
||||
struct drm_modeset_acquire_ctx *ctx);
|
||||
int intel_pin_and_fence_fb_obj(struct drm_plane *plane,
|
||||
struct drm_framebuffer *fb,
|
||||
const struct drm_plane_state *plane_state,
|
||||
struct intel_engine_cs *pipelined,
|
||||
struct drm_i915_gem_request **pipelined_request);
|
||||
const struct drm_plane_state *plane_state);
|
||||
struct drm_framebuffer *
|
||||
__intel_framebuffer_create(struct drm_device *dev,
|
||||
struct drm_mode_fb_cmd2 *mode_cmd,
|
||||
@ -1150,7 +1176,10 @@ void broxton_ddi_phy_uninit(struct drm_device *dev);
|
||||
void bxt_enable_dc9(struct drm_i915_private *dev_priv);
|
||||
void bxt_disable_dc9(struct drm_i915_private *dev_priv);
|
||||
void skl_init_cdclk(struct drm_i915_private *dev_priv);
|
||||
int skl_sanitize_cdclk(struct drm_i915_private *dev_priv);
|
||||
void skl_uninit_cdclk(struct drm_i915_private *dev_priv);
|
||||
void skl_enable_dc6(struct drm_i915_private *dev_priv);
|
||||
void skl_disable_dc6(struct drm_i915_private *dev_priv);
|
||||
void intel_dp_get_m_n(struct intel_crtc *crtc,
|
||||
struct intel_crtc_state *pipe_config);
|
||||
void intel_dp_set_m_n(struct intel_crtc *crtc, enum link_m_n_set m_n);
|
||||
@ -1167,33 +1196,30 @@ void hsw_enable_ips(struct intel_crtc *crtc);
|
||||
void hsw_disable_ips(struct intel_crtc *crtc);
|
||||
enum intel_display_power_domain
|
||||
intel_display_port_power_domain(struct intel_encoder *intel_encoder);
|
||||
enum intel_display_power_domain
|
||||
intel_display_port_aux_power_domain(struct intel_encoder *intel_encoder);
|
||||
void intel_mode_from_pipe_config(struct drm_display_mode *mode,
|
||||
struct intel_crtc_state *pipe_config);
|
||||
void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc);
|
||||
void intel_modeset_preclose(struct drm_device *dev, struct drm_file *file);
|
||||
|
||||
int skl_update_scaler_crtc(struct intel_crtc_state *crtc_state);
|
||||
int skl_max_scale(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state);
|
||||
|
||||
unsigned long intel_plane_obj_offset(struct intel_plane *intel_plane,
|
||||
struct drm_i915_gem_object *obj,
|
||||
unsigned int plane);
|
||||
u32 intel_plane_obj_offset(struct intel_plane *intel_plane,
|
||||
struct drm_i915_gem_object *obj,
|
||||
unsigned int plane);
|
||||
|
||||
u32 skl_plane_ctl_format(uint32_t pixel_format);
|
||||
u32 skl_plane_ctl_tiling(uint64_t fb_modifier);
|
||||
u32 skl_plane_ctl_rotation(unsigned int rotation);
|
||||
|
||||
/* intel_csr.c */
|
||||
void intel_csr_ucode_init(struct drm_device *dev);
|
||||
enum csr_state intel_csr_load_status_get(struct drm_i915_private *dev_priv);
|
||||
void intel_csr_load_status_set(struct drm_i915_private *dev_priv,
|
||||
enum csr_state state);
|
||||
void intel_csr_load_program(struct drm_device *dev);
|
||||
void intel_csr_ucode_fini(struct drm_device *dev);
|
||||
void assert_csr_loaded(struct drm_i915_private *dev_priv);
|
||||
void intel_csr_ucode_init(struct drm_i915_private *);
|
||||
void intel_csr_load_program(struct drm_i915_private *);
|
||||
void intel_csr_ucode_fini(struct drm_i915_private *);
|
||||
|
||||
/* intel_dp.c */
|
||||
void intel_dp_init(struct drm_device *dev, int output_reg, enum port port);
|
||||
void intel_dp_init(struct drm_device *dev, i915_reg_t output_reg, enum port port);
|
||||
bool intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
|
||||
struct intel_connector *intel_connector);
|
||||
void intel_dp_set_link_params(struct intel_dp *intel_dp,
|
||||
@ -1231,6 +1257,22 @@ bool intel_digital_port_connected(struct drm_i915_private *dev_priv,
|
||||
struct intel_digital_port *port);
|
||||
void hsw_dp_set_ddi_pll_sel(struct intel_crtc_state *pipe_config);
|
||||
|
||||
void
|
||||
intel_dp_program_link_training_pattern(struct intel_dp *intel_dp,
|
||||
uint8_t dp_train_pat);
|
||||
void
|
||||
intel_dp_set_signal_levels(struct intel_dp *intel_dp);
|
||||
void intel_dp_set_idle_link_train(struct intel_dp *intel_dp);
|
||||
uint8_t
|
||||
intel_dp_voltage_max(struct intel_dp *intel_dp);
|
||||
uint8_t
|
||||
intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing);
|
||||
void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock,
|
||||
uint8_t *link_bw, uint8_t *rate_select);
|
||||
bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp);
|
||||
bool
|
||||
intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE]);
|
||||
|
||||
/* intel_dp_mst.c */
|
||||
int intel_dp_mst_encoder_init(struct intel_digital_port *intel_dig_port, int conn_id);
|
||||
void intel_dp_mst_encoder_cleanup(struct intel_digital_port *intel_dig_port);
|
||||
@ -1245,7 +1287,7 @@ void intel_dvo_init(struct drm_device *dev);
|
||||
/* legacy fbdev emulation in intel_fbdev.c */
|
||||
#ifdef CONFIG_DRM_FBDEV_EMULATION
|
||||
extern int intel_fbdev_init(struct drm_device *dev);
|
||||
extern void intel_fbdev_initial_config(void *data, async_cookie_t cookie);
|
||||
extern void intel_fbdev_initial_config_async(struct drm_device *dev);
|
||||
extern void intel_fbdev_fini(struct drm_device *dev);
|
||||
extern void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous);
|
||||
extern void intel_fbdev_output_poll_changed(struct drm_device *dev);
|
||||
@ -1256,7 +1298,7 @@ static inline int intel_fbdev_init(struct drm_device *dev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void intel_fbdev_initial_config(void *data, async_cookie_t cookie)
|
||||
static inline void intel_fbdev_initial_config_async(struct drm_device *dev)
|
||||
{
|
||||
}
|
||||
|
||||
@ -1284,11 +1326,10 @@ void intel_fbc_invalidate(struct drm_i915_private *dev_priv,
|
||||
enum fb_op_origin origin);
|
||||
void intel_fbc_flush(struct drm_i915_private *dev_priv,
|
||||
unsigned int frontbuffer_bits, enum fb_op_origin origin);
|
||||
const char *intel_no_fbc_reason_str(enum no_fbc_reason reason);
|
||||
void intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv);
|
||||
|
||||
/* intel_hdmi.c */
|
||||
void intel_hdmi_init(struct drm_device *dev, int hdmi_reg, enum port port);
|
||||
void intel_hdmi_init(struct drm_device *dev, i915_reg_t hdmi_reg, enum port port);
|
||||
void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
|
||||
struct intel_connector *intel_connector);
|
||||
struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder);
|
||||
@ -1364,7 +1405,10 @@ void intel_psr_single_frame_update(struct drm_device *dev,
|
||||
/* intel_runtime_pm.c */
|
||||
int intel_power_domains_init(struct drm_i915_private *);
|
||||
void intel_power_domains_fini(struct drm_i915_private *);
|
||||
void intel_power_domains_init_hw(struct drm_i915_private *dev_priv);
|
||||
void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume);
|
||||
void intel_power_domains_suspend(struct drm_i915_private *dev_priv);
|
||||
void skl_pw1_misc_io_init(struct drm_i915_private *dev_priv);
|
||||
void skl_pw1_misc_io_fini(struct drm_i915_private *dev_priv);
|
||||
void intel_runtime_pm_enable(struct drm_i915_private *dev_priv);
|
||||
|
||||
bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
|
||||
@ -1375,8 +1419,6 @@ void intel_display_power_get(struct drm_i915_private *dev_priv,
|
||||
enum intel_display_power_domain domain);
|
||||
void intel_display_power_put(struct drm_i915_private *dev_priv,
|
||||
enum intel_display_power_domain domain);
|
||||
void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv);
|
||||
void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv);
|
||||
void intel_runtime_pm_get(struct drm_i915_private *dev_priv);
|
||||
void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv);
|
||||
void intel_runtime_pm_put(struct drm_i915_private *dev_priv);
|
||||
@ -1394,12 +1436,6 @@ void intel_init_clock_gating(struct drm_device *dev);
|
||||
void intel_suspend_hw(struct drm_device *dev);
|
||||
int ilk_wm_max_level(const struct drm_device *dev);
|
||||
void intel_update_watermarks(struct drm_crtc *crtc);
|
||||
void intel_update_sprite_watermarks(struct drm_plane *plane,
|
||||
struct drm_crtc *crtc,
|
||||
uint32_t sprite_width,
|
||||
uint32_t sprite_height,
|
||||
int pixel_size,
|
||||
bool enabled, bool scaled);
|
||||
void intel_init_pm(struct drm_device *dev);
|
||||
void intel_pm_setup(struct drm_device *dev);
|
||||
void intel_gpu_ips_init(struct drm_i915_private *dev_priv);
|
||||
@ -1427,7 +1463,8 @@ void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
|
||||
uint32_t ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config);
|
||||
|
||||
/* intel_sdvo.c */
|
||||
bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob);
|
||||
bool intel_sdvo_init(struct drm_device *dev,
|
||||
i915_reg_t reg, enum port port);
|
||||
|
||||
|
||||
/* intel_sprite.c */
|
||||
|
@ -60,7 +60,8 @@ static void wait_for_dsi_fifo_empty(struct intel_dsi *intel_dsi, enum port port)
|
||||
DRM_ERROR("DPI FIFOs are not empty\n");
|
||||
}
|
||||
|
||||
static void write_data(struct drm_i915_private *dev_priv, u32 reg,
|
||||
static void write_data(struct drm_i915_private *dev_priv,
|
||||
i915_reg_t reg,
|
||||
const u8 *data, u32 len)
|
||||
{
|
||||
u32 i, j;
|
||||
@ -75,7 +76,8 @@ static void write_data(struct drm_i915_private *dev_priv, u32 reg,
|
||||
}
|
||||
}
|
||||
|
||||
static void read_data(struct drm_i915_private *dev_priv, u32 reg,
|
||||
static void read_data(struct drm_i915_private *dev_priv,
|
||||
i915_reg_t reg,
|
||||
u8 *data, u32 len)
|
||||
{
|
||||
u32 i, j;
|
||||
@ -98,7 +100,8 @@ static ssize_t intel_dsi_host_transfer(struct mipi_dsi_host *host,
|
||||
struct mipi_dsi_packet packet;
|
||||
ssize_t ret;
|
||||
const u8 *header, *data;
|
||||
u32 data_reg, data_mask, ctrl_reg, ctrl_mask;
|
||||
i915_reg_t data_reg, ctrl_reg;
|
||||
u32 data_mask, ctrl_mask;
|
||||
|
||||
ret = mipi_dsi_create_packet(&packet, msg);
|
||||
if (ret < 0)
|
||||
@ -377,10 +380,10 @@ static void intel_dsi_port_enable(struct intel_encoder *encoder)
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
|
||||
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
|
||||
enum port port;
|
||||
u32 temp;
|
||||
u32 port_ctrl;
|
||||
|
||||
if (intel_dsi->dual_link == DSI_DUAL_LINK_FRONT_BACK) {
|
||||
u32 temp;
|
||||
|
||||
temp = I915_READ(VLV_CHICKEN_3);
|
||||
temp &= ~PIXEL_OVERLAP_CNT_MASK |
|
||||
intel_dsi->pixel_overlap <<
|
||||
@ -389,8 +392,9 @@ static void intel_dsi_port_enable(struct intel_encoder *encoder)
|
||||
}
|
||||
|
||||
for_each_dsi_port(port, intel_dsi->ports) {
|
||||
port_ctrl = IS_BROXTON(dev) ? BXT_MIPI_PORT_CTRL(port) :
|
||||
MIPI_PORT_CTRL(port);
|
||||
i915_reg_t port_ctrl = IS_BROXTON(dev) ?
|
||||
BXT_MIPI_PORT_CTRL(port) : MIPI_PORT_CTRL(port);
|
||||
u32 temp;
|
||||
|
||||
temp = I915_READ(port_ctrl);
|
||||
|
||||
@ -416,13 +420,13 @@ static void intel_dsi_port_disable(struct intel_encoder *encoder)
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
|
||||
enum port port;
|
||||
u32 temp;
|
||||
u32 port_ctrl;
|
||||
|
||||
for_each_dsi_port(port, intel_dsi->ports) {
|
||||
i915_reg_t port_ctrl = IS_BROXTON(dev) ?
|
||||
BXT_MIPI_PORT_CTRL(port) : MIPI_PORT_CTRL(port);
|
||||
u32 temp;
|
||||
|
||||
/* de-assert ip_tg_enable signal */
|
||||
port_ctrl = IS_BROXTON(dev) ? BXT_MIPI_PORT_CTRL(port) :
|
||||
MIPI_PORT_CTRL(port);
|
||||
temp = I915_READ(port_ctrl);
|
||||
I915_WRITE(port_ctrl, temp & ~DPI_ENABLE);
|
||||
POSTING_READ(port_ctrl);
|
||||
@ -580,11 +584,13 @@ static void intel_dsi_clear_device_ready(struct intel_encoder *encoder)
|
||||
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
|
||||
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
|
||||
enum port port;
|
||||
u32 val;
|
||||
u32 port_ctrl = 0;
|
||||
|
||||
DRM_DEBUG_KMS("\n");
|
||||
for_each_dsi_port(port, intel_dsi->ports) {
|
||||
/* Common bit for both MIPI Port A & MIPI Port C on VLV/CHV */
|
||||
i915_reg_t port_ctrl = IS_BROXTON(dev) ?
|
||||
BXT_MIPI_PORT_CTRL(port) : MIPI_PORT_CTRL(PORT_A);
|
||||
u32 val;
|
||||
|
||||
I915_WRITE(MIPI_DEVICE_READY(port), DEVICE_READY |
|
||||
ULPS_STATE_ENTER);
|
||||
@ -598,12 +604,6 @@ static void intel_dsi_clear_device_ready(struct intel_encoder *encoder)
|
||||
ULPS_STATE_ENTER);
|
||||
usleep_range(2000, 2500);
|
||||
|
||||
if (IS_BROXTON(dev))
|
||||
port_ctrl = BXT_MIPI_PORT_CTRL(port);
|
||||
else if (IS_VALLEYVIEW(dev))
|
||||
/* Common bit for both MIPI Port A & MIPI Port C */
|
||||
port_ctrl = MIPI_PORT_CTRL(PORT_A);
|
||||
|
||||
/* Wait till Clock lanes are in LP-00 state for MIPI Port A
|
||||
* only. MIPI Port C has no similar bit for checking
|
||||
*/
|
||||
@ -656,7 +656,6 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
|
||||
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
|
||||
struct drm_device *dev = encoder->base.dev;
|
||||
enum intel_display_power_domain power_domain;
|
||||
u32 dpi_enabled, func, ctrl_reg;
|
||||
enum port port;
|
||||
|
||||
DRM_DEBUG_KMS("\n");
|
||||
@ -667,9 +666,11 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
|
||||
|
||||
/* XXX: this only works for one DSI output */
|
||||
for_each_dsi_port(port, intel_dsi->ports) {
|
||||
i915_reg_t ctrl_reg = IS_BROXTON(dev) ?
|
||||
BXT_MIPI_PORT_CTRL(port) : MIPI_PORT_CTRL(port);
|
||||
u32 dpi_enabled, func;
|
||||
|
||||
func = I915_READ(MIPI_DSI_FUNC_PRG(port));
|
||||
ctrl_reg = IS_BROXTON(dev) ? BXT_MIPI_PORT_CTRL(port) :
|
||||
MIPI_PORT_CTRL(port);
|
||||
dpi_enabled = I915_READ(ctrl_reg) & DPI_ENABLE;
|
||||
|
||||
/* Due to some hardware limitations on BYT, MIPI Port C DPI
|
||||
|
@ -44,6 +44,7 @@ static const struct intel_dvo_device intel_dvo_devices[] = {
|
||||
.type = INTEL_DVO_CHIP_TMDS,
|
||||
.name = "sil164",
|
||||
.dvo_reg = DVOC,
|
||||
.dvo_srcdim_reg = DVOC_SRCDIM,
|
||||
.slave_addr = SIL164_ADDR,
|
||||
.dev_ops = &sil164_ops,
|
||||
},
|
||||
@ -51,6 +52,7 @@ static const struct intel_dvo_device intel_dvo_devices[] = {
|
||||
.type = INTEL_DVO_CHIP_TMDS,
|
||||
.name = "ch7xxx",
|
||||
.dvo_reg = DVOC,
|
||||
.dvo_srcdim_reg = DVOC_SRCDIM,
|
||||
.slave_addr = CH7xxx_ADDR,
|
||||
.dev_ops = &ch7xxx_ops,
|
||||
},
|
||||
@ -58,6 +60,7 @@ static const struct intel_dvo_device intel_dvo_devices[] = {
|
||||
.type = INTEL_DVO_CHIP_TMDS,
|
||||
.name = "ch7xxx",
|
||||
.dvo_reg = DVOC,
|
||||
.dvo_srcdim_reg = DVOC_SRCDIM,
|
||||
.slave_addr = 0x75, /* For some ch7010 */
|
||||
.dev_ops = &ch7xxx_ops,
|
||||
},
|
||||
@ -65,6 +68,7 @@ static const struct intel_dvo_device intel_dvo_devices[] = {
|
||||
.type = INTEL_DVO_CHIP_LVDS,
|
||||
.name = "ivch",
|
||||
.dvo_reg = DVOA,
|
||||
.dvo_srcdim_reg = DVOA_SRCDIM,
|
||||
.slave_addr = 0x02, /* Might also be 0x44, 0x84, 0xc4 */
|
||||
.dev_ops = &ivch_ops,
|
||||
},
|
||||
@ -72,6 +76,7 @@ static const struct intel_dvo_device intel_dvo_devices[] = {
|
||||
.type = INTEL_DVO_CHIP_TMDS,
|
||||
.name = "tfp410",
|
||||
.dvo_reg = DVOC,
|
||||
.dvo_srcdim_reg = DVOC_SRCDIM,
|
||||
.slave_addr = TFP410_ADDR,
|
||||
.dev_ops = &tfp410_ops,
|
||||
},
|
||||
@ -79,6 +84,7 @@ static const struct intel_dvo_device intel_dvo_devices[] = {
|
||||
.type = INTEL_DVO_CHIP_LVDS,
|
||||
.name = "ch7017",
|
||||
.dvo_reg = DVOC,
|
||||
.dvo_srcdim_reg = DVOC_SRCDIM,
|
||||
.slave_addr = 0x75,
|
||||
.gpio = GMBUS_PIN_DPB,
|
||||
.dev_ops = &ch7017_ops,
|
||||
@ -87,6 +93,7 @@ static const struct intel_dvo_device intel_dvo_devices[] = {
|
||||
.type = INTEL_DVO_CHIP_TMDS,
|
||||
.name = "ns2501",
|
||||
.dvo_reg = DVOB,
|
||||
.dvo_srcdim_reg = DVOB_SRCDIM,
|
||||
.slave_addr = NS2501_ADDR,
|
||||
.dev_ops = &ns2501_ops,
|
||||
}
|
||||
@ -171,7 +178,7 @@ static void intel_disable_dvo(struct intel_encoder *encoder)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
|
||||
struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
|
||||
u32 dvo_reg = intel_dvo->dev.dvo_reg;
|
||||
i915_reg_t dvo_reg = intel_dvo->dev.dvo_reg;
|
||||
u32 temp = I915_READ(dvo_reg);
|
||||
|
||||
intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, false);
|
||||
@ -184,7 +191,7 @@ static void intel_enable_dvo(struct intel_encoder *encoder)
|
||||
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
|
||||
struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
|
||||
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
|
||||
u32 dvo_reg = intel_dvo->dev.dvo_reg;
|
||||
i915_reg_t dvo_reg = intel_dvo->dev.dvo_reg;
|
||||
u32 temp = I915_READ(dvo_reg);
|
||||
|
||||
intel_dvo->dev.dev_ops->mode_set(&intel_dvo->dev,
|
||||
@ -255,20 +262,8 @@ static void intel_dvo_pre_enable(struct intel_encoder *encoder)
|
||||
struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
|
||||
int pipe = crtc->pipe;
|
||||
u32 dvo_val;
|
||||
u32 dvo_reg = intel_dvo->dev.dvo_reg, dvo_srcdim_reg;
|
||||
|
||||
switch (dvo_reg) {
|
||||
case DVOA:
|
||||
default:
|
||||
dvo_srcdim_reg = DVOA_SRCDIM;
|
||||
break;
|
||||
case DVOB:
|
||||
dvo_srcdim_reg = DVOB_SRCDIM;
|
||||
break;
|
||||
case DVOC:
|
||||
dvo_srcdim_reg = DVOC_SRCDIM;
|
||||
break;
|
||||
}
|
||||
i915_reg_t dvo_reg = intel_dvo->dev.dvo_reg;
|
||||
i915_reg_t dvo_srcdim_reg = intel_dvo->dev.dvo_srcdim_reg;
|
||||
|
||||
/* Save the data order, since I don't know what it should be set to. */
|
||||
dvo_val = I915_READ(dvo_reg) &
|
||||
|
@ -46,6 +46,11 @@ static inline bool fbc_supported(struct drm_i915_private *dev_priv)
|
||||
return dev_priv->fbc.enable_fbc != NULL;
|
||||
}
|
||||
|
||||
static inline bool fbc_on_pipe_a_only(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
return IS_HASWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 8;
|
||||
}
|
||||
|
||||
/*
|
||||
* In some platforms where the CRTC's x:0/y:0 coordinates doesn't match the
|
||||
* frontbuffer's x:0/y:0 coordinates we lie to the hardware about the plane's
|
||||
@ -182,7 +187,8 @@ static bool g4x_fbc_enabled(struct drm_i915_private *dev_priv)
|
||||
return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN;
|
||||
}
|
||||
|
||||
static void intel_fbc_nuke(struct drm_i915_private *dev_priv)
|
||||
/* This function forces a CFB recompression through the nuke operation. */
|
||||
static void intel_fbc_recompress(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
I915_WRITE(MSG_FBC_REND_STATE, FBC_REND_NUKE);
|
||||
POSTING_READ(MSG_FBC_REND_STATE);
|
||||
@ -231,7 +237,7 @@ static void ilk_fbc_enable(struct intel_crtc *crtc)
|
||||
I915_WRITE(DPFC_CPU_FENCE_OFFSET, y_offset);
|
||||
}
|
||||
|
||||
intel_fbc_nuke(dev_priv);
|
||||
intel_fbc_recompress(dev_priv);
|
||||
|
||||
DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(crtc->plane));
|
||||
}
|
||||
@ -310,7 +316,7 @@ static void gen7_fbc_enable(struct intel_crtc *crtc)
|
||||
SNB_CPU_FENCE_ENABLE | obj->fence_reg);
|
||||
I915_WRITE(DPFC_CPU_FENCE_OFFSET, get_crtc_fence_y_offset(crtc));
|
||||
|
||||
intel_fbc_nuke(dev_priv);
|
||||
intel_fbc_recompress(dev_priv);
|
||||
|
||||
DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(crtc->plane));
|
||||
}
|
||||
@ -370,8 +376,6 @@ static void intel_fbc_cancel_work(struct drm_i915_private *dev_priv)
|
||||
if (dev_priv->fbc.fbc_work == NULL)
|
||||
return;
|
||||
|
||||
DRM_DEBUG_KMS("cancelling pending FBC enable\n");
|
||||
|
||||
/* Synchronisation is provided by struct_mutex and checking of
|
||||
* dev_priv->fbc.fbc_work, so we can perform the cancellation
|
||||
* entirely asynchronously.
|
||||
@ -432,7 +436,8 @@ static void __intel_fbc_disable(struct drm_i915_private *dev_priv)
|
||||
|
||||
intel_fbc_cancel_work(dev_priv);
|
||||
|
||||
dev_priv->fbc.disable_fbc(dev_priv);
|
||||
if (dev_priv->fbc.enabled)
|
||||
dev_priv->fbc.disable_fbc(dev_priv);
|
||||
dev_priv->fbc.crtc = NULL;
|
||||
}
|
||||
|
||||
@ -471,78 +476,45 @@ void intel_fbc_disable_crtc(struct intel_crtc *crtc)
|
||||
mutex_unlock(&dev_priv->fbc.lock);
|
||||
}
|
||||
|
||||
const char *intel_no_fbc_reason_str(enum no_fbc_reason reason)
|
||||
{
|
||||
switch (reason) {
|
||||
case FBC_OK:
|
||||
return "FBC enabled but currently disabled in hardware";
|
||||
case FBC_UNSUPPORTED:
|
||||
return "unsupported by this chipset";
|
||||
case FBC_NO_OUTPUT:
|
||||
return "no output";
|
||||
case FBC_STOLEN_TOO_SMALL:
|
||||
return "not enough stolen memory";
|
||||
case FBC_UNSUPPORTED_MODE:
|
||||
return "mode incompatible with compression";
|
||||
case FBC_MODE_TOO_LARGE:
|
||||
return "mode too large for compression";
|
||||
case FBC_BAD_PLANE:
|
||||
return "FBC unsupported on plane";
|
||||
case FBC_NOT_TILED:
|
||||
return "framebuffer not tiled or fenced";
|
||||
case FBC_MULTIPLE_PIPES:
|
||||
return "more than one pipe active";
|
||||
case FBC_MODULE_PARAM:
|
||||
return "disabled per module param";
|
||||
case FBC_CHIP_DEFAULT:
|
||||
return "disabled per chip default";
|
||||
case FBC_ROTATION:
|
||||
return "rotation unsupported";
|
||||
case FBC_IN_DBG_MASTER:
|
||||
return "Kernel debugger is active";
|
||||
case FBC_BAD_STRIDE:
|
||||
return "framebuffer stride not supported";
|
||||
case FBC_PIXEL_RATE:
|
||||
return "pixel rate is too big";
|
||||
case FBC_PIXEL_FORMAT:
|
||||
return "pixel format is invalid";
|
||||
default:
|
||||
MISSING_CASE(reason);
|
||||
return "unknown reason";
|
||||
}
|
||||
}
|
||||
|
||||
static void set_no_fbc_reason(struct drm_i915_private *dev_priv,
|
||||
enum no_fbc_reason reason)
|
||||
const char *reason)
|
||||
{
|
||||
if (dev_priv->fbc.no_fbc_reason == reason)
|
||||
return;
|
||||
|
||||
dev_priv->fbc.no_fbc_reason = reason;
|
||||
DRM_DEBUG_KMS("Disabling FBC: %s\n", intel_no_fbc_reason_str(reason));
|
||||
DRM_DEBUG_KMS("Disabling FBC: %s\n", reason);
|
||||
}
|
||||
|
||||
static bool crtc_is_valid(struct intel_crtc *crtc)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
|
||||
|
||||
if (fbc_on_pipe_a_only(dev_priv) && crtc->pipe != PIPE_A)
|
||||
return false;
|
||||
|
||||
if (!intel_crtc_active(&crtc->base))
|
||||
return false;
|
||||
|
||||
if (!to_intel_plane_state(crtc->base.primary->state)->visible)
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static struct drm_crtc *intel_fbc_find_crtc(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct drm_crtc *crtc = NULL, *tmp_crtc;
|
||||
enum pipe pipe;
|
||||
bool pipe_a_only = false;
|
||||
|
||||
if (IS_HASWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 8)
|
||||
pipe_a_only = true;
|
||||
|
||||
for_each_pipe(dev_priv, pipe) {
|
||||
tmp_crtc = dev_priv->pipe_to_crtc_mapping[pipe];
|
||||
|
||||
if (intel_crtc_active(tmp_crtc) &&
|
||||
to_intel_plane_state(tmp_crtc->primary->state)->visible)
|
||||
if (crtc_is_valid(to_intel_crtc(tmp_crtc)))
|
||||
crtc = tmp_crtc;
|
||||
|
||||
if (pipe_a_only)
|
||||
break;
|
||||
}
|
||||
|
||||
if (!crtc || crtc->primary->fb == NULL)
|
||||
if (!crtc)
|
||||
return NULL;
|
||||
|
||||
return crtc;
|
||||
@ -581,7 +553,8 @@ static int find_compression_threshold(struct drm_i915_private *dev_priv,
|
||||
* reserved range size, so it always assumes the maximum (8mb) is used.
|
||||
* If we enable FBC using a CFB on that memory range we'll get FIFO
|
||||
* underruns, even if that range is not reserved by the BIOS. */
|
||||
if (IS_BROADWELL(dev_priv) || IS_SKYLAKE(dev_priv))
|
||||
if (IS_BROADWELL(dev_priv) ||
|
||||
IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
|
||||
end = dev_priv->gtt.stolen_size - 8 * 1024 * 1024;
|
||||
else
|
||||
end = dev_priv->gtt.stolen_usable_size;
|
||||
@ -734,6 +707,7 @@ static int intel_fbc_calculate_cfb_size(struct intel_crtc *crtc)
|
||||
if (INTEL_INFO(dev_priv)->gen >= 7)
|
||||
lines = min(lines, 2048);
|
||||
|
||||
/* Hardware needs the full buffer stride, not just the active area. */
|
||||
return lines * fb->pitches[0];
|
||||
}
|
||||
|
||||
@ -832,84 +806,62 @@ static bool intel_fbc_hw_tracking_covers_screen(struct intel_crtc *crtc)
|
||||
* __intel_fbc_update - enable/disable FBC as needed, unlocked
|
||||
* @dev_priv: i915 device instance
|
||||
*
|
||||
* Set up the framebuffer compression hardware at mode set time. We
|
||||
* enable it if possible:
|
||||
* - plane A only (on pre-965)
|
||||
* - no pixel mulitply/line duplication
|
||||
* - no alpha buffer discard
|
||||
* - no dual wide
|
||||
* - framebuffer <= max_hdisplay in width, max_vdisplay in height
|
||||
*
|
||||
* We can't assume that any compression will take place (worst case),
|
||||
* so the compressed buffer has to be the same size as the uncompressed
|
||||
* one. It also must reside (along with the line length buffer) in
|
||||
* stolen memory.
|
||||
*
|
||||
* We need to enable/disable FBC on a global basis.
|
||||
* This function completely reevaluates the status of FBC, then enables,
|
||||
* disables or maintains it on the same state.
|
||||
*/
|
||||
static void __intel_fbc_update(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct drm_crtc *crtc = NULL;
|
||||
struct intel_crtc *intel_crtc;
|
||||
struct drm_crtc *drm_crtc = NULL;
|
||||
struct intel_crtc *crtc;
|
||||
struct drm_framebuffer *fb;
|
||||
struct drm_i915_gem_object *obj;
|
||||
const struct drm_display_mode *adjusted_mode;
|
||||
|
||||
WARN_ON(!mutex_is_locked(&dev_priv->fbc.lock));
|
||||
|
||||
/* disable framebuffer compression in vGPU */
|
||||
if (intel_vgpu_active(dev_priv->dev))
|
||||
i915.enable_fbc = 0;
|
||||
|
||||
if (i915.enable_fbc < 0) {
|
||||
set_no_fbc_reason(dev_priv, FBC_CHIP_DEFAULT);
|
||||
set_no_fbc_reason(dev_priv, "disabled per chip default");
|
||||
goto out_disable;
|
||||
}
|
||||
|
||||
if (!i915.enable_fbc) {
|
||||
set_no_fbc_reason(dev_priv, FBC_MODULE_PARAM);
|
||||
set_no_fbc_reason(dev_priv, "disabled per module param");
|
||||
goto out_disable;
|
||||
}
|
||||
|
||||
/*
|
||||
* If FBC is already on, we just have to verify that we can
|
||||
* keep it that way...
|
||||
* Need to disable if:
|
||||
* - more than one pipe is active
|
||||
* - changing FBC params (stride, fence, mode)
|
||||
* - new fb is too large to fit in compressed buffer
|
||||
* - going to an unsupported config (interlace, pixel multiply, etc.)
|
||||
*/
|
||||
crtc = intel_fbc_find_crtc(dev_priv);
|
||||
if (!crtc) {
|
||||
set_no_fbc_reason(dev_priv, FBC_NO_OUTPUT);
|
||||
drm_crtc = intel_fbc_find_crtc(dev_priv);
|
||||
if (!drm_crtc) {
|
||||
set_no_fbc_reason(dev_priv, "no output");
|
||||
goto out_disable;
|
||||
}
|
||||
|
||||
if (!multiple_pipes_ok(dev_priv)) {
|
||||
set_no_fbc_reason(dev_priv, FBC_MULTIPLE_PIPES);
|
||||
set_no_fbc_reason(dev_priv, "more than one pipe active");
|
||||
goto out_disable;
|
||||
}
|
||||
|
||||
intel_crtc = to_intel_crtc(crtc);
|
||||
fb = crtc->primary->fb;
|
||||
crtc = to_intel_crtc(drm_crtc);
|
||||
fb = crtc->base.primary->fb;
|
||||
obj = intel_fb_obj(fb);
|
||||
adjusted_mode = &intel_crtc->config->base.adjusted_mode;
|
||||
adjusted_mode = &crtc->config->base.adjusted_mode;
|
||||
|
||||
if ((adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) ||
|
||||
(adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)) {
|
||||
set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED_MODE);
|
||||
set_no_fbc_reason(dev_priv, "incompatible mode");
|
||||
goto out_disable;
|
||||
}
|
||||
|
||||
if (!intel_fbc_hw_tracking_covers_screen(intel_crtc)) {
|
||||
set_no_fbc_reason(dev_priv, FBC_MODE_TOO_LARGE);
|
||||
if (!intel_fbc_hw_tracking_covers_screen(crtc)) {
|
||||
set_no_fbc_reason(dev_priv, "mode too large for compression");
|
||||
goto out_disable;
|
||||
}
|
||||
|
||||
if ((INTEL_INFO(dev_priv)->gen < 4 || HAS_DDI(dev_priv)) &&
|
||||
intel_crtc->plane != PLANE_A) {
|
||||
set_no_fbc_reason(dev_priv, FBC_BAD_PLANE);
|
||||
crtc->plane != PLANE_A) {
|
||||
set_no_fbc_reason(dev_priv, "FBC unsupported on plane");
|
||||
goto out_disable;
|
||||
}
|
||||
|
||||
@ -918,41 +870,35 @@ static void __intel_fbc_update(struct drm_i915_private *dev_priv)
|
||||
*/
|
||||
if (obj->tiling_mode != I915_TILING_X ||
|
||||
obj->fence_reg == I915_FENCE_REG_NONE) {
|
||||
set_no_fbc_reason(dev_priv, FBC_NOT_TILED);
|
||||
set_no_fbc_reason(dev_priv, "framebuffer not tiled or fenced");
|
||||
goto out_disable;
|
||||
}
|
||||
if (INTEL_INFO(dev_priv)->gen <= 4 && !IS_G4X(dev_priv) &&
|
||||
crtc->primary->state->rotation != BIT(DRM_ROTATE_0)) {
|
||||
set_no_fbc_reason(dev_priv, FBC_ROTATION);
|
||||
crtc->base.primary->state->rotation != BIT(DRM_ROTATE_0)) {
|
||||
set_no_fbc_reason(dev_priv, "rotation unsupported");
|
||||
goto out_disable;
|
||||
}
|
||||
|
||||
if (!stride_is_valid(dev_priv, fb->pitches[0])) {
|
||||
set_no_fbc_reason(dev_priv, FBC_BAD_STRIDE);
|
||||
set_no_fbc_reason(dev_priv, "framebuffer stride not supported");
|
||||
goto out_disable;
|
||||
}
|
||||
|
||||
if (!pixel_format_is_valid(fb)) {
|
||||
set_no_fbc_reason(dev_priv, FBC_PIXEL_FORMAT);
|
||||
goto out_disable;
|
||||
}
|
||||
|
||||
/* If the kernel debugger is active, always disable compression */
|
||||
if (in_dbg_master()) {
|
||||
set_no_fbc_reason(dev_priv, FBC_IN_DBG_MASTER);
|
||||
set_no_fbc_reason(dev_priv, "pixel format is invalid");
|
||||
goto out_disable;
|
||||
}
|
||||
|
||||
/* WaFbcExceedCdClockThreshold:hsw,bdw */
|
||||
if ((IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) &&
|
||||
ilk_pipe_pixel_rate(intel_crtc->config) >=
|
||||
ilk_pipe_pixel_rate(crtc->config) >=
|
||||
dev_priv->cdclk_freq * 95 / 100) {
|
||||
set_no_fbc_reason(dev_priv, FBC_PIXEL_RATE);
|
||||
set_no_fbc_reason(dev_priv, "pixel rate is too big");
|
||||
goto out_disable;
|
||||
}
|
||||
|
||||
if (intel_fbc_setup_cfb(intel_crtc)) {
|
||||
set_no_fbc_reason(dev_priv, FBC_STOLEN_TOO_SMALL);
|
||||
if (intel_fbc_setup_cfb(crtc)) {
|
||||
set_no_fbc_reason(dev_priv, "not enough stolen memory");
|
||||
goto out_disable;
|
||||
}
|
||||
|
||||
@ -961,9 +907,9 @@ static void __intel_fbc_update(struct drm_i915_private *dev_priv)
|
||||
* cannot be unpinned (and have its GTT offset and fence revoked)
|
||||
* without first being decoupled from the scanout and FBC disabled.
|
||||
*/
|
||||
if (dev_priv->fbc.crtc == intel_crtc &&
|
||||
if (dev_priv->fbc.crtc == crtc &&
|
||||
dev_priv->fbc.fb_id == fb->base.id &&
|
||||
dev_priv->fbc.y == crtc->y)
|
||||
dev_priv->fbc.y == crtc->base.y)
|
||||
return;
|
||||
|
||||
if (intel_fbc_enabled(dev_priv)) {
|
||||
@ -994,8 +940,8 @@ static void __intel_fbc_update(struct drm_i915_private *dev_priv)
|
||||
__intel_fbc_disable(dev_priv);
|
||||
}
|
||||
|
||||
intel_fbc_schedule_enable(intel_crtc);
|
||||
dev_priv->fbc.no_fbc_reason = FBC_OK;
|
||||
intel_fbc_schedule_enable(crtc);
|
||||
dev_priv->fbc.no_fbc_reason = "FBC enabled (not necessarily active)";
|
||||
return;
|
||||
|
||||
out_disable:
|
||||
@ -1085,10 +1031,10 @@ void intel_fbc_init(struct drm_i915_private *dev_priv)
|
||||
enum pipe pipe;
|
||||
|
||||
mutex_init(&dev_priv->fbc.lock);
|
||||
dev_priv->fbc.enabled = false;
|
||||
|
||||
if (!HAS_FBC(dev_priv)) {
|
||||
dev_priv->fbc.enabled = false;
|
||||
dev_priv->fbc.no_fbc_reason = FBC_UNSUPPORTED;
|
||||
dev_priv->fbc.no_fbc_reason = "unsupported by this chipset";
|
||||
return;
|
||||
}
|
||||
|
||||
@ -1096,7 +1042,7 @@ void intel_fbc_init(struct drm_i915_private *dev_priv)
|
||||
dev_priv->fbc.possible_framebuffer_bits |=
|
||||
INTEL_FRONTBUFFER_PRIMARY(pipe);
|
||||
|
||||
if (IS_HASWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 8)
|
||||
if (fbc_on_pipe_a_only(dev_priv))
|
||||
break;
|
||||
}
|
||||
|
||||
@ -1121,5 +1067,9 @@ void intel_fbc_init(struct drm_i915_private *dev_priv)
|
||||
I915_WRITE(FBC_CONTROL, 500 << FBC_CTL_INTERVAL_SHIFT);
|
||||
}
|
||||
|
||||
dev_priv->fbc.enabled = dev_priv->fbc.fbc_enabled(dev_priv);
|
||||
/* We still don't have any sort of hardware state readout for FBC, so
|
||||
* disable it in case the BIOS enabled it to make sure software matches
|
||||
* the hardware state. */
|
||||
if (dev_priv->fbc.fbc_enabled(dev_priv))
|
||||
dev_priv->fbc.disable_fbc(dev_priv);
|
||||
}
|
||||
|
@ -119,7 +119,7 @@ static int intelfb_alloc(struct drm_fb_helper *helper,
|
||||
{
|
||||
struct intel_fbdev *ifbdev =
|
||||
container_of(helper, struct intel_fbdev, helper);
|
||||
struct drm_framebuffer *fb;
|
||||
struct drm_framebuffer *fb = NULL;
|
||||
struct drm_device *dev = helper->dev;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct drm_mode_fb_cmd2 mode_cmd = {};
|
||||
@ -138,6 +138,8 @@ static int intelfb_alloc(struct drm_fb_helper *helper,
|
||||
mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
|
||||
sizes->surface_depth);
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
|
||||
size = mode_cmd.pitches[0] * mode_cmd.height;
|
||||
size = PAGE_ALIGN(size);
|
||||
|
||||
@ -156,26 +158,28 @@ static int intelfb_alloc(struct drm_fb_helper *helper,
|
||||
|
||||
fb = __intel_framebuffer_create(dev, &mode_cmd, obj);
|
||||
if (IS_ERR(fb)) {
|
||||
drm_gem_object_unreference(&obj->base);
|
||||
ret = PTR_ERR(fb);
|
||||
goto out_unref;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* Flush everything out, we'll be doing GTT only from now on */
|
||||
ret = intel_pin_and_fence_fb_obj(NULL, fb, NULL, NULL, NULL);
|
||||
ret = intel_pin_and_fence_fb_obj(NULL, fb, NULL);
|
||||
if (ret) {
|
||||
DRM_ERROR("failed to pin obj: %d\n", ret);
|
||||
goto out_fb;
|
||||
goto out;
|
||||
}
|
||||
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
ifbdev->fb = to_intel_framebuffer(fb);
|
||||
|
||||
return 0;
|
||||
|
||||
out_fb:
|
||||
drm_framebuffer_remove(fb);
|
||||
out_unref:
|
||||
drm_gem_object_unreference(&obj->base);
|
||||
out:
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
if (!IS_ERR_OR_NULL(fb))
|
||||
drm_framebuffer_unreference(fb);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -193,8 +197,6 @@ static int intelfb_create(struct drm_fb_helper *helper,
|
||||
int size, ret;
|
||||
bool prealloc = false;
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
|
||||
if (intel_fb &&
|
||||
(sizes->fb_width > intel_fb->base.width ||
|
||||
sizes->fb_height > intel_fb->base.height)) {
|
||||
@ -209,7 +211,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
|
||||
DRM_DEBUG_KMS("no BIOS fb, allocating a new one\n");
|
||||
ret = intelfb_alloc(helper, sizes);
|
||||
if (ret)
|
||||
goto out_unlock;
|
||||
return ret;
|
||||
intel_fb = ifbdev->fb;
|
||||
} else {
|
||||
DRM_DEBUG_KMS("re-using BIOS fb\n");
|
||||
@ -221,8 +223,11 @@ static int intelfb_create(struct drm_fb_helper *helper,
|
||||
obj = intel_fb->obj;
|
||||
size = obj->base.size;
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
|
||||
info = drm_fb_helper_alloc_fbi(helper);
|
||||
if (IS_ERR(info)) {
|
||||
DRM_ERROR("Failed to allocate fb_info\n");
|
||||
ret = PTR_ERR(info);
|
||||
goto out_unpin;
|
||||
}
|
||||
@ -249,6 +254,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
|
||||
ioremap_wc(dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj),
|
||||
size);
|
||||
if (!info->screen_base) {
|
||||
DRM_ERROR("Failed to remap framebuffer into virtual memory\n");
|
||||
ret = -ENOSPC;
|
||||
goto out_destroy_fbi;
|
||||
}
|
||||
@ -281,8 +287,6 @@ out_destroy_fbi:
|
||||
drm_fb_helper_release_fbi(helper);
|
||||
out_unpin:
|
||||
i915_gem_object_ggtt_unpin(obj);
|
||||
drm_gem_object_unreference(&obj->base);
|
||||
out_unlock:
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
return ret;
|
||||
}
|
||||
@ -526,8 +530,10 @@ static void intel_fbdev_destroy(struct drm_device *dev,
|
||||
|
||||
drm_fb_helper_fini(&ifbdev->helper);
|
||||
|
||||
drm_framebuffer_unregister_private(&ifbdev->fb->base);
|
||||
drm_framebuffer_remove(&ifbdev->fb->base);
|
||||
if (ifbdev->fb) {
|
||||
drm_framebuffer_unregister_private(&ifbdev->fb->base);
|
||||
drm_framebuffer_remove(&ifbdev->fb->base);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
@ -702,13 +708,20 @@ int intel_fbdev_init(struct drm_device *dev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
void intel_fbdev_initial_config(void *data, async_cookie_t cookie)
|
||||
static void intel_fbdev_initial_config(void *data, async_cookie_t cookie)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = data;
|
||||
struct intel_fbdev *ifbdev = dev_priv->fbdev;
|
||||
|
||||
/* Due to peculiar init order wrt to hpd handling this is separate. */
|
||||
drm_fb_helper_initial_config(&ifbdev->helper, ifbdev->preferred_bpp);
|
||||
if (drm_fb_helper_initial_config(&ifbdev->helper,
|
||||
ifbdev->preferred_bpp))
|
||||
intel_fbdev_fini(dev_priv->dev);
|
||||
}
|
||||
|
||||
void intel_fbdev_initial_config_async(struct drm_device *dev)
|
||||
{
|
||||
async_schedule(intel_fbdev_initial_config, to_i915(dev));
|
||||
}
|
||||
|
||||
void intel_fbdev_fini(struct drm_device *dev)
|
||||
@ -719,7 +732,8 @@ void intel_fbdev_fini(struct drm_device *dev)
|
||||
|
||||
flush_work(&dev_priv->fbdev_suspend_work);
|
||||
|
||||
async_synchronize_full();
|
||||
if (!current_is_async())
|
||||
async_synchronize_full();
|
||||
intel_fbdev_destroy(dev, dev_priv->fbdev);
|
||||
kfree(dev_priv->fbdev);
|
||||
dev_priv->fbdev = NULL;
|
||||
|
@ -84,38 +84,21 @@ static bool cpt_can_enable_serr_int(struct drm_device *dev)
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* i9xx_check_fifo_underruns - check for fifo underruns
|
||||
* @dev_priv: i915 device instance
|
||||
*
|
||||
* This function checks for fifo underruns on GMCH platforms. This needs to be
|
||||
* done manually on modeset to make sure that we catch all underruns since they
|
||||
* do not generate an interrupt by themselves on these platforms.
|
||||
*/
|
||||
void i9xx_check_fifo_underruns(struct drm_i915_private *dev_priv)
|
||||
static void i9xx_check_fifo_underruns(struct intel_crtc *crtc)
|
||||
{
|
||||
struct intel_crtc *crtc;
|
||||
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
|
||||
i915_reg_t reg = PIPESTAT(crtc->pipe);
|
||||
u32 pipestat = I915_READ(reg) & 0xffff0000;
|
||||
|
||||
spin_lock_irq(&dev_priv->irq_lock);
|
||||
assert_spin_locked(&dev_priv->irq_lock);
|
||||
|
||||
for_each_intel_crtc(dev_priv->dev, crtc) {
|
||||
u32 reg = PIPESTAT(crtc->pipe);
|
||||
u32 pipestat;
|
||||
if ((pipestat & PIPE_FIFO_UNDERRUN_STATUS) == 0)
|
||||
return;
|
||||
|
||||
if (crtc->cpu_fifo_underrun_disabled)
|
||||
continue;
|
||||
I915_WRITE(reg, pipestat | PIPE_FIFO_UNDERRUN_STATUS);
|
||||
POSTING_READ(reg);
|
||||
|
||||
pipestat = I915_READ(reg) & 0xffff0000;
|
||||
if ((pipestat & PIPE_FIFO_UNDERRUN_STATUS) == 0)
|
||||
continue;
|
||||
|
||||
I915_WRITE(reg, pipestat | PIPE_FIFO_UNDERRUN_STATUS);
|
||||
POSTING_READ(reg);
|
||||
|
||||
DRM_ERROR("pipe %c underrun\n", pipe_name(crtc->pipe));
|
||||
}
|
||||
|
||||
spin_unlock_irq(&dev_priv->irq_lock);
|
||||
DRM_ERROR("pipe %c underrun\n", pipe_name(crtc->pipe));
|
||||
}
|
||||
|
||||
static void i9xx_set_fifo_underrun_reporting(struct drm_device *dev,
|
||||
@ -123,7 +106,7 @@ static void i9xx_set_fifo_underrun_reporting(struct drm_device *dev,
|
||||
bool enable, bool old)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
u32 reg = PIPESTAT(pipe);
|
||||
i915_reg_t reg = PIPESTAT(pipe);
|
||||
u32 pipestat = I915_READ(reg) & 0xffff0000;
|
||||
|
||||
assert_spin_locked(&dev_priv->irq_lock);
|
||||
@ -150,6 +133,23 @@ static void ironlake_set_fifo_underrun_reporting(struct drm_device *dev,
|
||||
ironlake_disable_display_irq(dev_priv, bit);
|
||||
}
|
||||
|
||||
static void ivybridge_check_fifo_underruns(struct intel_crtc *crtc)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
|
||||
enum pipe pipe = crtc->pipe;
|
||||
uint32_t err_int = I915_READ(GEN7_ERR_INT);
|
||||
|
||||
assert_spin_locked(&dev_priv->irq_lock);
|
||||
|
||||
if ((err_int & ERR_INT_FIFO_UNDERRUN(pipe)) == 0)
|
||||
return;
|
||||
|
||||
I915_WRITE(GEN7_ERR_INT, ERR_INT_FIFO_UNDERRUN(pipe));
|
||||
POSTING_READ(GEN7_ERR_INT);
|
||||
|
||||
DRM_ERROR("fifo underrun on pipe %c\n", pipe_name(pipe));
|
||||
}
|
||||
|
||||
static void ivybridge_set_fifo_underrun_reporting(struct drm_device *dev,
|
||||
enum pipe pipe,
|
||||
bool enable, bool old)
|
||||
@ -202,6 +202,24 @@ static void ibx_set_fifo_underrun_reporting(struct drm_device *dev,
|
||||
ibx_disable_display_interrupt(dev_priv, bit);
|
||||
}
|
||||
|
||||
static void cpt_check_pch_fifo_underruns(struct intel_crtc *crtc)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
|
||||
enum transcoder pch_transcoder = (enum transcoder) crtc->pipe;
|
||||
uint32_t serr_int = I915_READ(SERR_INT);
|
||||
|
||||
assert_spin_locked(&dev_priv->irq_lock);
|
||||
|
||||
if ((serr_int & SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder)) == 0)
|
||||
return;
|
||||
|
||||
I915_WRITE(SERR_INT, SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder));
|
||||
POSTING_READ(SERR_INT);
|
||||
|
||||
DRM_ERROR("pch fifo underrun on pch transcoder %c\n",
|
||||
transcoder_name(pch_transcoder));
|
||||
}
|
||||
|
||||
static void cpt_set_fifo_underrun_reporting(struct drm_device *dev,
|
||||
enum transcoder pch_transcoder,
|
||||
bool enable, bool old)
|
||||
@ -375,3 +393,56 @@ void intel_pch_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv,
|
||||
DRM_ERROR("PCH transcoder %c FIFO underrun\n",
|
||||
transcoder_name(pch_transcoder));
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_check_cpu_fifo_underruns - check for CPU fifo underruns immediately
|
||||
* @dev_priv: i915 device instance
|
||||
*
|
||||
* Check for CPU fifo underruns immediately. Useful on IVB/HSW where the shared
|
||||
* error interrupt may have been disabled, and so CPU fifo underruns won't
|
||||
* necessarily raise an interrupt, and on GMCH platforms where underruns never
|
||||
* raise an interrupt.
|
||||
*/
|
||||
void intel_check_cpu_fifo_underruns(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct intel_crtc *crtc;
|
||||
|
||||
spin_lock_irq(&dev_priv->irq_lock);
|
||||
|
||||
for_each_intel_crtc(dev_priv->dev, crtc) {
|
||||
if (crtc->cpu_fifo_underrun_disabled)
|
||||
continue;
|
||||
|
||||
if (HAS_GMCH_DISPLAY(dev_priv))
|
||||
i9xx_check_fifo_underruns(crtc);
|
||||
else if (IS_GEN7(dev_priv))
|
||||
ivybridge_check_fifo_underruns(crtc);
|
||||
}
|
||||
|
||||
spin_unlock_irq(&dev_priv->irq_lock);
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_check_pch_fifo_underruns - check for PCH fifo underruns immediately
|
||||
* @dev_priv: i915 device instance
|
||||
*
|
||||
* Check for PCH fifo underruns immediately. Useful on CPT/PPT where the shared
|
||||
* error interrupt may have been disabled, and so PCH fifo underruns won't
|
||||
* necessarily raise an interrupt.
|
||||
*/
|
||||
void intel_check_pch_fifo_underruns(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct intel_crtc *crtc;
|
||||
|
||||
spin_lock_irq(&dev_priv->irq_lock);
|
||||
|
||||
for_each_intel_crtc(dev_priv->dev, crtc) {
|
||||
if (crtc->pch_fifo_underrun_disabled)
|
||||
continue;
|
||||
|
||||
if (HAS_PCH_CPT(dev_priv))
|
||||
cpt_check_pch_fifo_underruns(crtc);
|
||||
}
|
||||
|
||||
spin_unlock_irq(&dev_priv->irq_lock);
|
||||
}
|
||||
|
@ -76,11 +76,17 @@ struct intel_guc_fw {
|
||||
uint16_t guc_fw_minor_wanted;
|
||||
uint16_t guc_fw_major_found;
|
||||
uint16_t guc_fw_minor_found;
|
||||
|
||||
uint32_t header_size;
|
||||
uint32_t header_offset;
|
||||
uint32_t rsa_size;
|
||||
uint32_t rsa_offset;
|
||||
uint32_t ucode_size;
|
||||
uint32_t ucode_offset;
|
||||
};
|
||||
|
||||
struct intel_guc {
|
||||
struct intel_guc_fw guc_fw;
|
||||
|
||||
uint32_t log_flags;
|
||||
struct drm_i915_gem_object *log_obj;
|
||||
|
||||
|
@ -122,6 +122,78 @@
|
||||
|
||||
#define GUC_CTL_MAX_DWORDS (GUC_CTL_RSRVD + 1)
|
||||
|
||||
/**
|
||||
* DOC: GuC Firmware Layout
|
||||
*
|
||||
* The GuC firmware layout looks like this:
|
||||
*
|
||||
* +-------------------------------+
|
||||
* | guc_css_header |
|
||||
* | contains major/minor version |
|
||||
* +-------------------------------+
|
||||
* | uCode |
|
||||
* +-------------------------------+
|
||||
* | RSA signature |
|
||||
* +-------------------------------+
|
||||
* | modulus key |
|
||||
* +-------------------------------+
|
||||
* | exponent val |
|
||||
* +-------------------------------+
|
||||
*
|
||||
* The firmware may or may not have modulus key and exponent data. The header,
|
||||
* uCode and RSA signature are must-have components that will be used by driver.
|
||||
* Length of each components, which is all in dwords, can be found in header.
|
||||
* In the case that modulus and exponent are not present in fw, a.k.a truncated
|
||||
* image, the length value still appears in header.
|
||||
*
|
||||
* Driver will do some basic fw size validation based on the following rules:
|
||||
*
|
||||
* 1. Header, uCode and RSA are must-have components.
|
||||
* 2. All firmware components, if they present, are in the sequence illustrated
|
||||
* in the layout table above.
|
||||
* 3. Length info of each component can be found in header, in dwords.
|
||||
* 4. Modulus and exponent key are not required by driver. They may not appear
|
||||
* in fw. So driver will load a truncated firmware in this case.
|
||||
*/
|
||||
|
||||
struct guc_css_header {
|
||||
uint32_t module_type;
|
||||
/* header_size includes all non-uCode bits, including css_header, rsa
|
||||
* key, modulus key and exponent data. */
|
||||
uint32_t header_size_dw;
|
||||
uint32_t header_version;
|
||||
uint32_t module_id;
|
||||
uint32_t module_vendor;
|
||||
union {
|
||||
struct {
|
||||
uint8_t day;
|
||||
uint8_t month;
|
||||
uint16_t year;
|
||||
};
|
||||
uint32_t date;
|
||||
};
|
||||
uint32_t size_dw; /* uCode plus header_size_dw */
|
||||
uint32_t key_size_dw;
|
||||
uint32_t modulus_size_dw;
|
||||
uint32_t exponent_size_dw;
|
||||
union {
|
||||
struct {
|
||||
uint8_t hour;
|
||||
uint8_t min;
|
||||
uint16_t sec;
|
||||
};
|
||||
uint32_t time;
|
||||
};
|
||||
|
||||
char username[8];
|
||||
char buildnumber[12];
|
||||
uint32_t device_id;
|
||||
uint32_t guc_sw_version;
|
||||
uint32_t prod_preprod_fw;
|
||||
uint32_t reserved[12];
|
||||
uint32_t header_info;
|
||||
} __packed;
|
||||
|
||||
struct guc_doorbell_info {
|
||||
u32 db_status;
|
||||
u32 cookie;
|
||||
|
@ -31,7 +31,7 @@
|
||||
#include "intel_guc.h"
|
||||
|
||||
/**
|
||||
* DOC: GuC
|
||||
* DOC: GuC-specific firmware loader
|
||||
*
|
||||
* intel_guc:
|
||||
* Top level structure of guc. It handles firmware loading and manages client
|
||||
@ -208,16 +208,6 @@ static inline bool guc_ucode_response(struct drm_i915_private *dev_priv,
|
||||
/*
|
||||
* Transfer the firmware image to RAM for execution by the microcontroller.
|
||||
*
|
||||
* GuC Firmware layout:
|
||||
* +-------------------------------+ ----
|
||||
* | CSS header | 128B
|
||||
* | contains major/minor version |
|
||||
* +-------------------------------+ ----
|
||||
* | uCode |
|
||||
* +-------------------------------+ ----
|
||||
* | RSA signature | 256B
|
||||
* +-------------------------------+ ----
|
||||
*
|
||||
* Architecturally, the DMA engine is bidirectional, and can potentially even
|
||||
* transfer between GTT locations. This functionality is left out of the API
|
||||
* for now as there is no need for it.
|
||||
@ -225,33 +215,29 @@ static inline bool guc_ucode_response(struct drm_i915_private *dev_priv,
|
||||
* Note that GuC needs the CSS header plus uKernel code to be copied by the
|
||||
* DMA engine in one operation, whereas the RSA signature is loaded via MMIO.
|
||||
*/
|
||||
|
||||
#define UOS_CSS_HEADER_OFFSET 0
|
||||
#define UOS_VER_MINOR_OFFSET 0x44
|
||||
#define UOS_VER_MAJOR_OFFSET 0x46
|
||||
#define UOS_CSS_HEADER_SIZE 0x80
|
||||
#define UOS_RSA_SIG_SIZE 0x100
|
||||
|
||||
static int guc_ucode_xfer_dma(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
|
||||
struct drm_i915_gem_object *fw_obj = guc_fw->guc_fw_obj;
|
||||
unsigned long offset;
|
||||
struct sg_table *sg = fw_obj->pages;
|
||||
u32 status, ucode_size, rsa[UOS_RSA_SIG_SIZE / sizeof(u32)];
|
||||
u32 status, rsa[UOS_RSA_SCRATCH_MAX_COUNT];
|
||||
int i, ret = 0;
|
||||
|
||||
/* uCode size, also is where RSA signature starts */
|
||||
offset = ucode_size = guc_fw->guc_fw_size - UOS_RSA_SIG_SIZE;
|
||||
I915_WRITE(DMA_COPY_SIZE, ucode_size);
|
||||
/* where RSA signature starts */
|
||||
offset = guc_fw->rsa_offset;
|
||||
|
||||
/* Copy RSA signature from the fw image to HW for verification */
|
||||
sg_pcopy_to_buffer(sg->sgl, sg->nents, rsa, UOS_RSA_SIG_SIZE, offset);
|
||||
for (i = 0; i < UOS_RSA_SIG_SIZE / sizeof(u32); i++)
|
||||
sg_pcopy_to_buffer(sg->sgl, sg->nents, rsa, sizeof(rsa), offset);
|
||||
for (i = 0; i < UOS_RSA_SCRATCH_MAX_COUNT; i++)
|
||||
I915_WRITE(UOS_RSA_SCRATCH(i), rsa[i]);
|
||||
|
||||
/* The header plus uCode will be copied to WOPCM via DMA, excluding any
|
||||
* other components */
|
||||
I915_WRITE(DMA_COPY_SIZE, guc_fw->header_size + guc_fw->ucode_size);
|
||||
|
||||
/* Set the source address for the new blob */
|
||||
offset = i915_gem_obj_ggtt_offset(fw_obj);
|
||||
offset = i915_gem_obj_ggtt_offset(fw_obj) + guc_fw->header_offset;
|
||||
I915_WRITE(DMA_ADDR_0_LOW, lower_32_bits(offset));
|
||||
I915_WRITE(DMA_ADDR_0_HIGH, upper_32_bits(offset) & 0xFFFF);
|
||||
|
||||
@ -322,8 +308,8 @@ static int guc_ucode_xfer(struct drm_i915_private *dev_priv)
|
||||
I915_WRITE(GUC_SHIM_CONTROL, GUC_SHIM_CONTROL_VALUE);
|
||||
|
||||
/* WaDisableMinuteIaClockGating:skl,bxt */
|
||||
if ((IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0) ||
|
||||
(IS_BROXTON(dev) && INTEL_REVID(dev) == BXT_REVID_A0)) {
|
||||
if (IS_SKL_REVID(dev, 0, SKL_REVID_B0) ||
|
||||
IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
|
||||
I915_WRITE(GUC_SHIM_CONTROL, (I915_READ(GUC_SHIM_CONTROL) &
|
||||
~GUC_ENABLE_MIA_CLOCK_GATING));
|
||||
}
|
||||
@ -378,6 +364,9 @@ int intel_guc_ucode_load(struct drm_device *dev)
|
||||
struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
|
||||
int err = 0;
|
||||
|
||||
if (!i915.enable_guc_submission)
|
||||
return 0;
|
||||
|
||||
DRM_DEBUG_DRIVER("GuC fw status: fetch %s, load %s\n",
|
||||
intel_guc_fw_status_repr(guc_fw->guc_fw_fetch_status),
|
||||
intel_guc_fw_status_repr(guc_fw->guc_fw_load_status));
|
||||
@ -457,10 +446,8 @@ static void guc_fw_fetch(struct drm_device *dev, struct intel_guc_fw *guc_fw)
|
||||
{
|
||||
struct drm_i915_gem_object *obj;
|
||||
const struct firmware *fw;
|
||||
const u8 *css_header;
|
||||
const size_t minsize = UOS_CSS_HEADER_SIZE + UOS_RSA_SIG_SIZE;
|
||||
const size_t maxsize = GUC_WOPCM_SIZE_VALUE + UOS_RSA_SIG_SIZE
|
||||
- 0x8000; /* 32k reserved (8K stack + 24k context) */
|
||||
struct guc_css_header *css;
|
||||
size_t size;
|
||||
int err;
|
||||
|
||||
DRM_DEBUG_DRIVER("before requesting firmware: GuC fw fetch status %s\n",
|
||||
@ -474,12 +461,52 @@ static void guc_fw_fetch(struct drm_device *dev, struct intel_guc_fw *guc_fw)
|
||||
|
||||
DRM_DEBUG_DRIVER("fetch GuC fw from %s succeeded, fw %p\n",
|
||||
guc_fw->guc_fw_path, fw);
|
||||
DRM_DEBUG_DRIVER("firmware file size %zu (minimum %zu, maximum %zu)\n",
|
||||
fw->size, minsize, maxsize);
|
||||
|
||||
/* Check the size of the blob befoe examining buffer contents */
|
||||
if (fw->size < minsize || fw->size > maxsize)
|
||||
/* Check the size of the blob before examining buffer contents */
|
||||
if (fw->size < sizeof(struct guc_css_header)) {
|
||||
DRM_ERROR("Firmware header is missing\n");
|
||||
goto fail;
|
||||
}
|
||||
|
||||
css = (struct guc_css_header *)fw->data;
|
||||
|
||||
/* Firmware bits always start from header */
|
||||
guc_fw->header_offset = 0;
|
||||
guc_fw->header_size = (css->header_size_dw - css->modulus_size_dw -
|
||||
css->key_size_dw - css->exponent_size_dw) * sizeof(u32);
|
||||
|
||||
if (guc_fw->header_size != sizeof(struct guc_css_header)) {
|
||||
DRM_ERROR("CSS header definition mismatch\n");
|
||||
goto fail;
|
||||
}
|
||||
|
||||
/* then, uCode */
|
||||
guc_fw->ucode_offset = guc_fw->header_offset + guc_fw->header_size;
|
||||
guc_fw->ucode_size = (css->size_dw - css->header_size_dw) * sizeof(u32);
|
||||
|
||||
/* now RSA */
|
||||
if (css->key_size_dw != UOS_RSA_SCRATCH_MAX_COUNT) {
|
||||
DRM_ERROR("RSA key size is bad\n");
|
||||
goto fail;
|
||||
}
|
||||
guc_fw->rsa_offset = guc_fw->ucode_offset + guc_fw->ucode_size;
|
||||
guc_fw->rsa_size = css->key_size_dw * sizeof(u32);
|
||||
|
||||
/* At least, it should have header, uCode and RSA. Size of all three. */
|
||||
size = guc_fw->header_size + guc_fw->ucode_size + guc_fw->rsa_size;
|
||||
if (fw->size < size) {
|
||||
DRM_ERROR("Missing firmware components\n");
|
||||
goto fail;
|
||||
}
|
||||
|
||||
/* Header and uCode will be loaded to WOPCM. Size of the two. */
|
||||
size = guc_fw->header_size + guc_fw->ucode_size;
|
||||
|
||||
/* Top 32k of WOPCM is reserved (8K stack + 24k RC6 context). */
|
||||
if (size > GUC_WOPCM_SIZE_VALUE - 0x8000) {
|
||||
DRM_ERROR("Firmware is too large to fit in WOPCM\n");
|
||||
goto fail;
|
||||
}
|
||||
|
||||
/*
|
||||
* The GuC firmware image has the version number embedded at a well-known
|
||||
@ -487,9 +514,8 @@ static void guc_fw_fetch(struct drm_device *dev, struct intel_guc_fw *guc_fw)
|
||||
* TWO bytes each (i.e. u16), although all pointers and offsets are defined
|
||||
* in terms of bytes (u8).
|
||||
*/
|
||||
css_header = fw->data + UOS_CSS_HEADER_OFFSET;
|
||||
guc_fw->guc_fw_major_found = *(u16 *)(css_header + UOS_VER_MAJOR_OFFSET);
|
||||
guc_fw->guc_fw_minor_found = *(u16 *)(css_header + UOS_VER_MINOR_OFFSET);
|
||||
guc_fw->guc_fw_major_found = css->guc_sw_version >> 16;
|
||||
guc_fw->guc_fw_minor_found = css->guc_sw_version & 0xFFFF;
|
||||
|
||||
if (guc_fw->guc_fw_major_found != guc_fw->guc_fw_major_wanted ||
|
||||
guc_fw->guc_fw_minor_found < guc_fw->guc_fw_minor_wanted) {
|
||||
@ -566,6 +592,9 @@ void intel_guc_ucode_init(struct drm_device *dev)
|
||||
fw_path = ""; /* unknown device */
|
||||
}
|
||||
|
||||
if (!i915.enable_guc_submission)
|
||||
return;
|
||||
|
||||
guc_fw->guc_dev = dev;
|
||||
guc_fw->guc_fw_path = fw_path;
|
||||
guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_NONE;
|
||||
|
@ -113,10 +113,11 @@ static u32 hsw_infoframe_enable(enum hdmi_infoframe_type type)
|
||||
}
|
||||
}
|
||||
|
||||
static u32 hsw_dip_data_reg(struct drm_i915_private *dev_priv,
|
||||
enum transcoder cpu_transcoder,
|
||||
enum hdmi_infoframe_type type,
|
||||
int i)
|
||||
static i915_reg_t
|
||||
hsw_dip_data_reg(struct drm_i915_private *dev_priv,
|
||||
enum transcoder cpu_transcoder,
|
||||
enum hdmi_infoframe_type type,
|
||||
int i)
|
||||
{
|
||||
switch (type) {
|
||||
case HDMI_INFOFRAME_TYPE_AVI:
|
||||
@ -127,7 +128,7 @@ static u32 hsw_dip_data_reg(struct drm_i915_private *dev_priv,
|
||||
return HSW_TVIDEO_DIP_VS_DATA(cpu_transcoder, i);
|
||||
default:
|
||||
DRM_DEBUG_DRIVER("unknown info frame type %d\n", type);
|
||||
return 0;
|
||||
return INVALID_MMIO_REG;
|
||||
}
|
||||
}
|
||||
|
||||
@ -193,8 +194,9 @@ static void ibx_write_infoframe(struct drm_encoder *encoder,
|
||||
struct drm_device *dev = encoder->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
|
||||
int i, reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
|
||||
i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
|
||||
u32 val = I915_READ(reg);
|
||||
int i;
|
||||
|
||||
WARN(!(val & VIDEO_DIP_ENABLE), "Writing DIP with CTL reg disabled\n");
|
||||
|
||||
@ -229,7 +231,7 @@ static bool ibx_infoframe_enabled(struct drm_encoder *encoder)
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
|
||||
struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
|
||||
int reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
|
||||
i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
|
||||
u32 val = I915_READ(reg);
|
||||
|
||||
if ((val & VIDEO_DIP_ENABLE) == 0)
|
||||
@ -251,8 +253,9 @@ static void cpt_write_infoframe(struct drm_encoder *encoder,
|
||||
struct drm_device *dev = encoder->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
|
||||
int i, reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
|
||||
i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
|
||||
u32 val = I915_READ(reg);
|
||||
int i;
|
||||
|
||||
WARN(!(val & VIDEO_DIP_ENABLE), "Writing DIP with CTL reg disabled\n");
|
||||
|
||||
@ -289,8 +292,7 @@ static bool cpt_infoframe_enabled(struct drm_encoder *encoder)
|
||||
struct drm_device *dev = encoder->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
|
||||
int reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
|
||||
u32 val = I915_READ(reg);
|
||||
u32 val = I915_READ(TVIDEO_DIP_CTL(intel_crtc->pipe));
|
||||
|
||||
if ((val & VIDEO_DIP_ENABLE) == 0)
|
||||
return false;
|
||||
@ -308,8 +310,9 @@ static void vlv_write_infoframe(struct drm_encoder *encoder,
|
||||
struct drm_device *dev = encoder->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
|
||||
int i, reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe);
|
||||
i915_reg_t reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe);
|
||||
u32 val = I915_READ(reg);
|
||||
int i;
|
||||
|
||||
WARN(!(val & VIDEO_DIP_ENABLE), "Writing DIP with CTL reg disabled\n");
|
||||
|
||||
@ -344,8 +347,7 @@ static bool vlv_infoframe_enabled(struct drm_encoder *encoder)
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
|
||||
struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
|
||||
int reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe);
|
||||
u32 val = I915_READ(reg);
|
||||
u32 val = I915_READ(VLV_TVIDEO_DIP_CTL(intel_crtc->pipe));
|
||||
|
||||
if ((val & VIDEO_DIP_ENABLE) == 0)
|
||||
return false;
|
||||
@ -367,13 +369,13 @@ static void hsw_write_infoframe(struct drm_encoder *encoder,
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
|
||||
enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
|
||||
u32 ctl_reg = HSW_TVIDEO_DIP_CTL(cpu_transcoder);
|
||||
u32 data_reg;
|
||||
i915_reg_t ctl_reg = HSW_TVIDEO_DIP_CTL(cpu_transcoder);
|
||||
i915_reg_t data_reg;
|
||||
int i;
|
||||
u32 val = I915_READ(ctl_reg);
|
||||
|
||||
data_reg = hsw_dip_data_reg(dev_priv, cpu_transcoder, type, 0);
|
||||
if (data_reg == 0)
|
||||
if (i915_mmio_reg_valid(data_reg))
|
||||
return;
|
||||
|
||||
val &= ~hsw_infoframe_enable(type);
|
||||
@ -401,8 +403,7 @@ static bool hsw_infoframe_enabled(struct drm_encoder *encoder)
|
||||
struct drm_device *dev = encoder->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
|
||||
u32 ctl_reg = HSW_TVIDEO_DIP_CTL(intel_crtc->config->cpu_transcoder);
|
||||
u32 val = I915_READ(ctl_reg);
|
||||
u32 val = I915_READ(HSW_TVIDEO_DIP_CTL(intel_crtc->config->cpu_transcoder));
|
||||
|
||||
return val & (VIDEO_DIP_ENABLE_VSC_HSW | VIDEO_DIP_ENABLE_AVI_HSW |
|
||||
VIDEO_DIP_ENABLE_GCP_HSW | VIDEO_DIP_ENABLE_VS_HSW |
|
||||
@ -513,7 +514,7 @@ static void g4x_set_infoframes(struct drm_encoder *encoder,
|
||||
struct drm_i915_private *dev_priv = encoder->dev->dev_private;
|
||||
struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
|
||||
struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi;
|
||||
u32 reg = VIDEO_DIP_CTL;
|
||||
i915_reg_t reg = VIDEO_DIP_CTL;
|
||||
u32 val = I915_READ(reg);
|
||||
u32 port = VIDEO_DIP_PORT(intel_dig_port->port);
|
||||
|
||||
@ -633,7 +634,8 @@ static bool intel_hdmi_set_gcp_infoframe(struct drm_encoder *encoder)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = encoder->dev->dev_private;
|
||||
struct intel_crtc *crtc = to_intel_crtc(encoder->crtc);
|
||||
u32 reg, val = 0;
|
||||
i915_reg_t reg;
|
||||
u32 val = 0;
|
||||
|
||||
if (HAS_DDI(dev_priv))
|
||||
reg = HSW_TVIDEO_DIP_GCP(crtc->config->cpu_transcoder);
|
||||
@ -666,7 +668,7 @@ static void ibx_set_infoframes(struct drm_encoder *encoder,
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
|
||||
struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
|
||||
struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi;
|
||||
u32 reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
|
||||
i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
|
||||
u32 val = I915_READ(reg);
|
||||
u32 port = VIDEO_DIP_PORT(intel_dig_port->port);
|
||||
|
||||
@ -717,7 +719,7 @@ static void cpt_set_infoframes(struct drm_encoder *encoder,
|
||||
struct drm_i915_private *dev_priv = encoder->dev->dev_private;
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
|
||||
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
|
||||
u32 reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
|
||||
i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
|
||||
u32 val = I915_READ(reg);
|
||||
|
||||
assert_hdmi_port_disabled(intel_hdmi);
|
||||
@ -760,7 +762,7 @@ static void vlv_set_infoframes(struct drm_encoder *encoder,
|
||||
struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
|
||||
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
|
||||
u32 reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe);
|
||||
i915_reg_t reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe);
|
||||
u32 val = I915_READ(reg);
|
||||
u32 port = VIDEO_DIP_PORT(intel_dig_port->port);
|
||||
|
||||
@ -811,7 +813,7 @@ static void hsw_set_infoframes(struct drm_encoder *encoder,
|
||||
struct drm_i915_private *dev_priv = encoder->dev->dev_private;
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
|
||||
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
|
||||
u32 reg = HSW_TVIDEO_DIP_CTL(intel_crtc->config->cpu_transcoder);
|
||||
i915_reg_t reg = HSW_TVIDEO_DIP_CTL(intel_crtc->config->cpu_transcoder);
|
||||
u32 val = I915_READ(reg);
|
||||
|
||||
assert_hdmi_port_disabled(intel_hdmi);
|
||||
@ -1108,6 +1110,13 @@ static void intel_disable_hdmi(struct intel_encoder *encoder)
|
||||
* matching DP port to be enabled on transcoder A.
|
||||
*/
|
||||
if (HAS_PCH_IBX(dev) && crtc->pipe == PIPE_B) {
|
||||
/*
|
||||
* We get CPU/PCH FIFO underruns on the other pipe when
|
||||
* doing the workaround. Sweep them under the rug.
|
||||
*/
|
||||
intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, false);
|
||||
intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false);
|
||||
|
||||
temp &= ~SDVO_PIPE_B_SELECT;
|
||||
temp |= SDVO_ENABLE;
|
||||
/*
|
||||
@ -1122,6 +1131,10 @@ static void intel_disable_hdmi(struct intel_encoder *encoder)
|
||||
temp &= ~SDVO_ENABLE;
|
||||
I915_WRITE(intel_hdmi->hdmi_reg, temp);
|
||||
POSTING_READ(intel_hdmi->hdmi_reg);
|
||||
|
||||
intel_wait_for_vblank_if_active(dev_priv->dev, PIPE_A);
|
||||
intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true);
|
||||
intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
|
||||
}
|
||||
|
||||
intel_hdmi->set_infoframes(&encoder->base, false, NULL);
|
||||
@ -1335,21 +1348,18 @@ intel_hdmi_set_edid(struct drm_connector *connector, bool force)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(connector->dev);
|
||||
struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
|
||||
struct intel_encoder *intel_encoder =
|
||||
&hdmi_to_dig_port(intel_hdmi)->base;
|
||||
enum intel_display_power_domain power_domain;
|
||||
struct edid *edid = NULL;
|
||||
bool connected = false;
|
||||
|
||||
power_domain = intel_display_port_power_domain(intel_encoder);
|
||||
intel_display_power_get(dev_priv, power_domain);
|
||||
if (force) {
|
||||
intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS);
|
||||
|
||||
if (force)
|
||||
edid = drm_get_edid(connector,
|
||||
intel_gmbus_get_adapter(dev_priv,
|
||||
intel_hdmi->ddc_bus));
|
||||
|
||||
intel_display_power_put(dev_priv, power_domain);
|
||||
intel_display_power_put(dev_priv, POWER_DOMAIN_GMBUS);
|
||||
}
|
||||
|
||||
to_intel_connector(connector)->detect_edid = edid;
|
||||
if (edid && edid->input & DRM_EDID_INPUT_DIGITAL) {
|
||||
@ -1383,6 +1393,8 @@ intel_hdmi_detect(struct drm_connector *connector, bool force)
|
||||
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
|
||||
connector->base.id, connector->name);
|
||||
|
||||
intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS);
|
||||
|
||||
while (!live_status && --retry) {
|
||||
live_status = intel_digital_port_connected(dev_priv,
|
||||
hdmi_to_dig_port(intel_hdmi));
|
||||
@ -1402,6 +1414,8 @@ intel_hdmi_detect(struct drm_connector *connector, bool force)
|
||||
} else
|
||||
status = connector_status_disconnected;
|
||||
|
||||
intel_display_power_put(dev_priv, POWER_DOMAIN_GMBUS);
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
@ -2039,7 +2053,7 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
|
||||
* On BXT A0/A1, sw needs to activate DDIA HPD logic and
|
||||
* interrupts to check the external panel connection.
|
||||
*/
|
||||
if (IS_BROXTON(dev_priv) && (INTEL_REVID(dev) < BXT_REVID_B0))
|
||||
if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
|
||||
intel_encoder->hpd_pin = HPD_PORT_A;
|
||||
else
|
||||
intel_encoder->hpd_pin = HPD_PORT_B;
|
||||
@ -2131,7 +2145,8 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
|
||||
}
|
||||
}
|
||||
|
||||
void intel_hdmi_init(struct drm_device *dev, int hdmi_reg, enum port port)
|
||||
void intel_hdmi_init(struct drm_device *dev,
|
||||
i915_reg_t hdmi_reg, enum port port)
|
||||
{
|
||||
struct intel_digital_port *intel_dig_port;
|
||||
struct intel_encoder *intel_encoder;
|
||||
@ -2202,7 +2217,7 @@ void intel_hdmi_init(struct drm_device *dev, int hdmi_reg, enum port port)
|
||||
|
||||
intel_dig_port->port = port;
|
||||
intel_dig_port->hdmi.hdmi_reg = hdmi_reg;
|
||||
intel_dig_port->dp.output_reg = 0;
|
||||
intel_dig_port->dp.output_reg = INVALID_MMIO_REG;
|
||||
|
||||
intel_hdmi_init_connector(intel_dig_port, intel_connector);
|
||||
}
|
||||
|
@ -36,7 +36,7 @@
|
||||
|
||||
struct gmbus_pin {
|
||||
const char *name;
|
||||
int reg;
|
||||
i915_reg_t reg;
|
||||
};
|
||||
|
||||
/* Map gmbus pin pairs to names and registers. */
|
||||
@ -63,9 +63,9 @@ static const struct gmbus_pin gmbus_pins_skl[] = {
|
||||
};
|
||||
|
||||
static const struct gmbus_pin gmbus_pins_bxt[] = {
|
||||
[GMBUS_PIN_1_BXT] = { "dpb", PCH_GPIOB },
|
||||
[GMBUS_PIN_2_BXT] = { "dpc", PCH_GPIOC },
|
||||
[GMBUS_PIN_3_BXT] = { "misc", PCH_GPIOD },
|
||||
[GMBUS_PIN_1_BXT] = { "dpb", GPIOB },
|
||||
[GMBUS_PIN_2_BXT] = { "dpc", GPIOC },
|
||||
[GMBUS_PIN_3_BXT] = { "misc", GPIOD },
|
||||
};
|
||||
|
||||
/* pin is expected to be valid */
|
||||
@ -74,7 +74,7 @@ static const struct gmbus_pin *get_gmbus_pin(struct drm_i915_private *dev_priv,
|
||||
{
|
||||
if (IS_BROXTON(dev_priv))
|
||||
return &gmbus_pins_bxt[pin];
|
||||
else if (IS_SKYLAKE(dev_priv))
|
||||
else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
|
||||
return &gmbus_pins_skl[pin];
|
||||
else if (IS_BROADWELL(dev_priv))
|
||||
return &gmbus_pins_bdw[pin];
|
||||
@ -89,14 +89,15 @@ bool intel_gmbus_is_valid_pin(struct drm_i915_private *dev_priv,
|
||||
|
||||
if (IS_BROXTON(dev_priv))
|
||||
size = ARRAY_SIZE(gmbus_pins_bxt);
|
||||
else if (IS_SKYLAKE(dev_priv))
|
||||
else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
|
||||
size = ARRAY_SIZE(gmbus_pins_skl);
|
||||
else if (IS_BROADWELL(dev_priv))
|
||||
size = ARRAY_SIZE(gmbus_pins_bdw);
|
||||
else
|
||||
size = ARRAY_SIZE(gmbus_pins);
|
||||
|
||||
return pin < size && get_gmbus_pin(dev_priv, pin)->reg;
|
||||
return pin < size &&
|
||||
i915_mmio_reg_valid(get_gmbus_pin(dev_priv, pin)->reg);
|
||||
}
|
||||
|
||||
/* Intel GPIO access functions */
|
||||
@ -240,9 +241,8 @@ intel_gpio_setup(struct intel_gmbus *bus, unsigned int pin)
|
||||
|
||||
algo = &bus->bit_algo;
|
||||
|
||||
bus->gpio_reg = dev_priv->gpio_mmio_base +
|
||||
get_gmbus_pin(dev_priv, pin)->reg;
|
||||
|
||||
bus->gpio_reg = _MMIO(dev_priv->gpio_mmio_base +
|
||||
i915_mmio_reg_offset(get_gmbus_pin(dev_priv, pin)->reg));
|
||||
bus->adapter.algo_data = algo;
|
||||
algo->setsda = set_data;
|
||||
algo->setscl = set_clock;
|
||||
@ -483,7 +483,7 @@ gmbus_xfer(struct i2c_adapter *adapter,
|
||||
int i = 0, inc, try = 0;
|
||||
int ret = 0;
|
||||
|
||||
intel_aux_display_runtime_get(dev_priv);
|
||||
intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS);
|
||||
mutex_lock(&dev_priv->gmbus_mutex);
|
||||
|
||||
if (bus->force_bit) {
|
||||
@ -595,7 +595,9 @@ timeout:
|
||||
|
||||
out:
|
||||
mutex_unlock(&dev_priv->gmbus_mutex);
|
||||
intel_aux_display_runtime_put(dev_priv);
|
||||
|
||||
intel_display_power_put(dev_priv, POWER_DOMAIN_GMBUS);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -626,12 +628,13 @@ int intel_setup_gmbus(struct drm_device *dev)
|
||||
|
||||
if (HAS_PCH_NOP(dev))
|
||||
return 0;
|
||||
else if (HAS_PCH_SPLIT(dev))
|
||||
dev_priv->gpio_mmio_base = PCH_GPIOA - GPIOA;
|
||||
else if (IS_VALLEYVIEW(dev))
|
||||
|
||||
if (IS_VALLEYVIEW(dev))
|
||||
dev_priv->gpio_mmio_base = VLV_DISPLAY_BASE;
|
||||
else
|
||||
dev_priv->gpio_mmio_base = 0;
|
||||
else if (!HAS_GMCH_DISPLAY(dev_priv))
|
||||
dev_priv->gpio_mmio_base =
|
||||
i915_mmio_reg_offset(PCH_GPIOA) -
|
||||
i915_mmio_reg_offset(GPIOA);
|
||||
|
||||
mutex_init(&dev_priv->gmbus_mutex);
|
||||
init_waitqueue_head(&dev_priv->gmbus_wait_queue);
|
||||
|
@ -190,16 +190,21 @@
|
||||
#define GEN8_CTX_L3LLC_COHERENT (1<<5)
|
||||
#define GEN8_CTX_PRIVILEGE (1<<8)
|
||||
|
||||
#define ASSIGN_CTX_PDP(ppgtt, reg_state, n) { \
|
||||
#define ASSIGN_CTX_REG(reg_state, pos, reg, val) do { \
|
||||
(reg_state)[(pos)+0] = i915_mmio_reg_offset(reg); \
|
||||
(reg_state)[(pos)+1] = (val); \
|
||||
} while (0)
|
||||
|
||||
#define ASSIGN_CTX_PDP(ppgtt, reg_state, n) do { \
|
||||
const u64 _addr = i915_page_dir_dma_addr((ppgtt), (n)); \
|
||||
reg_state[CTX_PDP ## n ## _UDW+1] = upper_32_bits(_addr); \
|
||||
reg_state[CTX_PDP ## n ## _LDW+1] = lower_32_bits(_addr); \
|
||||
}
|
||||
} while (0)
|
||||
|
||||
#define ASSIGN_CTX_PML4(ppgtt, reg_state) { \
|
||||
#define ASSIGN_CTX_PML4(ppgtt, reg_state) do { \
|
||||
reg_state[CTX_PDP0_UDW + 1] = upper_32_bits(px_dma(&ppgtt->pml4)); \
|
||||
reg_state[CTX_PDP0_LDW + 1] = lower_32_bits(px_dma(&ppgtt->pml4)); \
|
||||
}
|
||||
} while (0)
|
||||
|
||||
enum {
|
||||
ADVANCED_CONTEXT = 0,
|
||||
@ -284,8 +289,8 @@ static bool disable_lite_restore_wa(struct intel_engine_cs *ring)
|
||||
{
|
||||
struct drm_device *dev = ring->dev;
|
||||
|
||||
return ((IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0) ||
|
||||
(IS_BROXTON(dev) && INTEL_REVID(dev) == BXT_REVID_A0)) &&
|
||||
return (IS_SKL_REVID(dev, 0, SKL_REVID_B0) ||
|
||||
IS_BXT_REVID(dev, 0, BXT_REVID_A1)) &&
|
||||
(ring->id == VCS || ring->id == VCS2);
|
||||
}
|
||||
|
||||
@ -921,7 +926,7 @@ int intel_execlists_submission(struct i915_execbuffer_params *params,
|
||||
|
||||
intel_logical_ring_emit(ringbuf, MI_NOOP);
|
||||
intel_logical_ring_emit(ringbuf, MI_LOAD_REGISTER_IMM(1));
|
||||
intel_logical_ring_emit(ringbuf, INSTPM);
|
||||
intel_logical_ring_emit_reg(ringbuf, INSTPM);
|
||||
intel_logical_ring_emit(ringbuf, instp_mask << 16 | instp_mode);
|
||||
intel_logical_ring_advance(ringbuf);
|
||||
|
||||
@ -1096,7 +1101,7 @@ static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req)
|
||||
|
||||
intel_logical_ring_emit(ringbuf, MI_LOAD_REGISTER_IMM(w->count));
|
||||
for (i = 0; i < w->count; i++) {
|
||||
intel_logical_ring_emit(ringbuf, w->reg[i].addr);
|
||||
intel_logical_ring_emit_reg(ringbuf, w->reg[i].addr);
|
||||
intel_logical_ring_emit(ringbuf, w->reg[i].value);
|
||||
}
|
||||
intel_logical_ring_emit(ringbuf, MI_NOOP);
|
||||
@ -1120,6 +1125,8 @@ static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req)
|
||||
batch[__index] = (cmd); \
|
||||
} while (0)
|
||||
|
||||
#define wa_ctx_emit_reg(batch, index, reg) \
|
||||
wa_ctx_emit((batch), (index), i915_mmio_reg_offset(reg))
|
||||
|
||||
/*
|
||||
* In this WA we need to set GEN8_L3SQCREG4[21:21] and reset it after
|
||||
@ -1149,17 +1156,17 @@ static inline int gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *ring,
|
||||
* this batch updates GEN8_L3SQCREG4 with default value we need to
|
||||
* set this bit here to retain the WA during flush.
|
||||
*/
|
||||
if (IS_SKYLAKE(ring->dev) && INTEL_REVID(ring->dev) <= SKL_REVID_E0)
|
||||
if (IS_SKL_REVID(ring->dev, 0, SKL_REVID_E0))
|
||||
l3sqc4_flush |= GEN8_LQSC_RO_PERF_DIS;
|
||||
|
||||
wa_ctx_emit(batch, index, (MI_STORE_REGISTER_MEM_GEN8 |
|
||||
MI_SRM_LRM_GLOBAL_GTT));
|
||||
wa_ctx_emit(batch, index, GEN8_L3SQCREG4);
|
||||
wa_ctx_emit_reg(batch, index, GEN8_L3SQCREG4);
|
||||
wa_ctx_emit(batch, index, ring->scratch.gtt_offset + 256);
|
||||
wa_ctx_emit(batch, index, 0);
|
||||
|
||||
wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(1));
|
||||
wa_ctx_emit(batch, index, GEN8_L3SQCREG4);
|
||||
wa_ctx_emit_reg(batch, index, GEN8_L3SQCREG4);
|
||||
wa_ctx_emit(batch, index, l3sqc4_flush);
|
||||
|
||||
wa_ctx_emit(batch, index, GFX_OP_PIPE_CONTROL(6));
|
||||
@ -1172,7 +1179,7 @@ static inline int gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *ring,
|
||||
|
||||
wa_ctx_emit(batch, index, (MI_LOAD_REGISTER_MEM_GEN8 |
|
||||
MI_SRM_LRM_GLOBAL_GTT));
|
||||
wa_ctx_emit(batch, index, GEN8_L3SQCREG4);
|
||||
wa_ctx_emit_reg(batch, index, GEN8_L3SQCREG4);
|
||||
wa_ctx_emit(batch, index, ring->scratch.gtt_offset + 256);
|
||||
wa_ctx_emit(batch, index, 0);
|
||||
|
||||
@ -1314,8 +1321,8 @@ static int gen9_init_indirectctx_bb(struct intel_engine_cs *ring,
|
||||
uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
|
||||
|
||||
/* WaDisableCtxRestoreArbitration:skl,bxt */
|
||||
if ((IS_SKYLAKE(dev) && (INTEL_REVID(dev) <= SKL_REVID_D0)) ||
|
||||
(IS_BROXTON(dev) && (INTEL_REVID(dev) == BXT_REVID_A0)))
|
||||
if (IS_SKL_REVID(dev, 0, SKL_REVID_D0) ||
|
||||
IS_BXT_REVID(dev, 0, BXT_REVID_A1))
|
||||
wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_DISABLE);
|
||||
|
||||
/* WaFlushCoherentL3CacheLinesAtContextSwitch:skl,bxt */
|
||||
@ -1340,18 +1347,18 @@ static int gen9_init_perctx_bb(struct intel_engine_cs *ring,
|
||||
uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
|
||||
|
||||
/* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:skl,bxt */
|
||||
if ((IS_SKYLAKE(dev) && (INTEL_REVID(dev) <= SKL_REVID_B0)) ||
|
||||
(IS_BROXTON(dev) && (INTEL_REVID(dev) == BXT_REVID_A0))) {
|
||||
if (IS_SKL_REVID(dev, 0, SKL_REVID_B0) ||
|
||||
IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
|
||||
wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(1));
|
||||
wa_ctx_emit(batch, index, GEN9_SLICE_COMMON_ECO_CHICKEN0);
|
||||
wa_ctx_emit_reg(batch, index, GEN9_SLICE_COMMON_ECO_CHICKEN0);
|
||||
wa_ctx_emit(batch, index,
|
||||
_MASKED_BIT_ENABLE(DISABLE_PIXEL_MASK_CAMMING));
|
||||
wa_ctx_emit(batch, index, MI_NOOP);
|
||||
}
|
||||
|
||||
/* WaDisableCtxRestoreArbitration:skl,bxt */
|
||||
if ((IS_SKYLAKE(dev) && (INTEL_REVID(dev) <= SKL_REVID_D0)) ||
|
||||
(IS_BROXTON(dev) && (INTEL_REVID(dev) == BXT_REVID_A0)))
|
||||
if (IS_SKL_REVID(dev, 0, SKL_REVID_D0) ||
|
||||
IS_BXT_REVID(dev, 0, BXT_REVID_A1))
|
||||
wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_ENABLE);
|
||||
|
||||
wa_ctx_emit(batch, index, MI_BATCH_BUFFER_END);
|
||||
@ -1472,12 +1479,6 @@ static int gen8_init_common_ring(struct intel_engine_cs *ring)
|
||||
I915_WRITE_IMR(ring, ~(ring->irq_enable_mask | ring->irq_keep_mask));
|
||||
I915_WRITE(RING_HWSTAM(ring->mmio_base), 0xffffffff);
|
||||
|
||||
if (ring->status_page.obj) {
|
||||
I915_WRITE(RING_HWS_PGA(ring->mmio_base),
|
||||
(u32)ring->status_page.gfx_addr);
|
||||
POSTING_READ(RING_HWS_PGA(ring->mmio_base));
|
||||
}
|
||||
|
||||
I915_WRITE(RING_MODE_GEN7(ring),
|
||||
_MASKED_BIT_DISABLE(GFX_REPLAY_MODE) |
|
||||
_MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE));
|
||||
@ -1562,9 +1563,9 @@ static int intel_logical_ring_emit_pdps(struct drm_i915_gem_request *req)
|
||||
for (i = GEN8_LEGACY_PDPES - 1; i >= 0; i--) {
|
||||
const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i);
|
||||
|
||||
intel_logical_ring_emit(ringbuf, GEN8_RING_PDP_UDW(ring, i));
|
||||
intel_logical_ring_emit_reg(ringbuf, GEN8_RING_PDP_UDW(ring, i));
|
||||
intel_logical_ring_emit(ringbuf, upper_32_bits(pd_daddr));
|
||||
intel_logical_ring_emit(ringbuf, GEN8_RING_PDP_LDW(ring, i));
|
||||
intel_logical_ring_emit_reg(ringbuf, GEN8_RING_PDP_LDW(ring, i));
|
||||
intel_logical_ring_emit(ringbuf, lower_32_bits(pd_daddr));
|
||||
}
|
||||
|
||||
@ -1923,6 +1924,7 @@ static int logical_ring_init(struct drm_device *dev, struct intel_engine_cs *rin
|
||||
i915_gem_batch_pool_init(dev, &ring->batch_pool);
|
||||
init_waitqueue_head(&ring->irq_queue);
|
||||
|
||||
INIT_LIST_HEAD(&ring->buffers);
|
||||
INIT_LIST_HEAD(&ring->execlist_queue);
|
||||
INIT_LIST_HEAD(&ring->execlist_retired_req_list);
|
||||
spin_lock_init(&ring->execlist_lock);
|
||||
@ -1972,7 +1974,7 @@ static int logical_render_ring_init(struct drm_device *dev)
|
||||
ring->init_hw = gen8_init_render_ring;
|
||||
ring->init_context = gen8_init_rcs_context;
|
||||
ring->cleanup = intel_fini_pipe_control;
|
||||
if (IS_BROXTON(dev) && INTEL_REVID(dev) < BXT_REVID_B0) {
|
||||
if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
|
||||
ring->get_seqno = bxt_a_get_seqno;
|
||||
ring->set_seqno = bxt_a_set_seqno;
|
||||
} else {
|
||||
@ -2024,7 +2026,7 @@ static int logical_bsd_ring_init(struct drm_device *dev)
|
||||
GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT;
|
||||
|
||||
ring->init_hw = gen8_init_common_ring;
|
||||
if (IS_BROXTON(dev) && INTEL_REVID(dev) < BXT_REVID_B0) {
|
||||
if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
|
||||
ring->get_seqno = bxt_a_get_seqno;
|
||||
ring->set_seqno = bxt_a_set_seqno;
|
||||
} else {
|
||||
@ -2079,7 +2081,7 @@ static int logical_blt_ring_init(struct drm_device *dev)
|
||||
GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT;
|
||||
|
||||
ring->init_hw = gen8_init_common_ring;
|
||||
if (IS_BROXTON(dev) && INTEL_REVID(dev) < BXT_REVID_B0) {
|
||||
if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
|
||||
ring->get_seqno = bxt_a_get_seqno;
|
||||
ring->set_seqno = bxt_a_set_seqno;
|
||||
} else {
|
||||
@ -2109,7 +2111,7 @@ static int logical_vebox_ring_init(struct drm_device *dev)
|
||||
GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT;
|
||||
|
||||
ring->init_hw = gen8_init_common_ring;
|
||||
if (IS_BROXTON(dev) && INTEL_REVID(dev) < BXT_REVID_B0) {
|
||||
if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
|
||||
ring->get_seqno = bxt_a_get_seqno;
|
||||
ring->set_seqno = bxt_a_set_seqno;
|
||||
} else {
|
||||
@ -2263,46 +2265,31 @@ populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_o
|
||||
* only for the first context restore: on a subsequent save, the GPU will
|
||||
* recreate this batchbuffer with new values (including all the missing
|
||||
* MI_LOAD_REGISTER_IMM commands that we are not initializing here). */
|
||||
if (ring->id == RCS)
|
||||
reg_state[CTX_LRI_HEADER_0] = MI_LOAD_REGISTER_IMM(14);
|
||||
else
|
||||
reg_state[CTX_LRI_HEADER_0] = MI_LOAD_REGISTER_IMM(11);
|
||||
reg_state[CTX_LRI_HEADER_0] |= MI_LRI_FORCE_POSTED;
|
||||
reg_state[CTX_CONTEXT_CONTROL] = RING_CONTEXT_CONTROL(ring);
|
||||
reg_state[CTX_CONTEXT_CONTROL+1] =
|
||||
_MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH |
|
||||
CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT |
|
||||
CTX_CTRL_RS_CTX_ENABLE);
|
||||
reg_state[CTX_RING_HEAD] = RING_HEAD(ring->mmio_base);
|
||||
reg_state[CTX_RING_HEAD+1] = 0;
|
||||
reg_state[CTX_RING_TAIL] = RING_TAIL(ring->mmio_base);
|
||||
reg_state[CTX_RING_TAIL+1] = 0;
|
||||
reg_state[CTX_RING_BUFFER_START] = RING_START(ring->mmio_base);
|
||||
reg_state[CTX_LRI_HEADER_0] =
|
||||
MI_LOAD_REGISTER_IMM(ring->id == RCS ? 14 : 11) | MI_LRI_FORCE_POSTED;
|
||||
ASSIGN_CTX_REG(reg_state, CTX_CONTEXT_CONTROL, RING_CONTEXT_CONTROL(ring),
|
||||
_MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH |
|
||||
CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT |
|
||||
CTX_CTRL_RS_CTX_ENABLE));
|
||||
ASSIGN_CTX_REG(reg_state, CTX_RING_HEAD, RING_HEAD(ring->mmio_base), 0);
|
||||
ASSIGN_CTX_REG(reg_state, CTX_RING_TAIL, RING_TAIL(ring->mmio_base), 0);
|
||||
/* Ring buffer start address is not known until the buffer is pinned.
|
||||
* It is written to the context image in execlists_update_context()
|
||||
*/
|
||||
reg_state[CTX_RING_BUFFER_CONTROL] = RING_CTL(ring->mmio_base);
|
||||
reg_state[CTX_RING_BUFFER_CONTROL+1] =
|
||||
((ringbuf->size - PAGE_SIZE) & RING_NR_PAGES) | RING_VALID;
|
||||
reg_state[CTX_BB_HEAD_U] = ring->mmio_base + 0x168;
|
||||
reg_state[CTX_BB_HEAD_U+1] = 0;
|
||||
reg_state[CTX_BB_HEAD_L] = ring->mmio_base + 0x140;
|
||||
reg_state[CTX_BB_HEAD_L+1] = 0;
|
||||
reg_state[CTX_BB_STATE] = ring->mmio_base + 0x110;
|
||||
reg_state[CTX_BB_STATE+1] = (1<<5);
|
||||
reg_state[CTX_SECOND_BB_HEAD_U] = ring->mmio_base + 0x11c;
|
||||
reg_state[CTX_SECOND_BB_HEAD_U+1] = 0;
|
||||
reg_state[CTX_SECOND_BB_HEAD_L] = ring->mmio_base + 0x114;
|
||||
reg_state[CTX_SECOND_BB_HEAD_L+1] = 0;
|
||||
reg_state[CTX_SECOND_BB_STATE] = ring->mmio_base + 0x118;
|
||||
reg_state[CTX_SECOND_BB_STATE+1] = 0;
|
||||
ASSIGN_CTX_REG(reg_state, CTX_RING_BUFFER_START, RING_START(ring->mmio_base), 0);
|
||||
ASSIGN_CTX_REG(reg_state, CTX_RING_BUFFER_CONTROL, RING_CTL(ring->mmio_base),
|
||||
((ringbuf->size - PAGE_SIZE) & RING_NR_PAGES) | RING_VALID);
|
||||
ASSIGN_CTX_REG(reg_state, CTX_BB_HEAD_U, RING_BBADDR_UDW(ring->mmio_base), 0);
|
||||
ASSIGN_CTX_REG(reg_state, CTX_BB_HEAD_L, RING_BBADDR(ring->mmio_base), 0);
|
||||
ASSIGN_CTX_REG(reg_state, CTX_BB_STATE, RING_BBSTATE(ring->mmio_base),
|
||||
RING_BB_PPGTT);
|
||||
ASSIGN_CTX_REG(reg_state, CTX_SECOND_BB_HEAD_U, RING_SBBADDR_UDW(ring->mmio_base), 0);
|
||||
ASSIGN_CTX_REG(reg_state, CTX_SECOND_BB_HEAD_L, RING_SBBADDR(ring->mmio_base), 0);
|
||||
ASSIGN_CTX_REG(reg_state, CTX_SECOND_BB_STATE, RING_SBBSTATE(ring->mmio_base), 0);
|
||||
if (ring->id == RCS) {
|
||||
reg_state[CTX_BB_PER_CTX_PTR] = ring->mmio_base + 0x1c0;
|
||||
reg_state[CTX_BB_PER_CTX_PTR+1] = 0;
|
||||
reg_state[CTX_RCS_INDIRECT_CTX] = ring->mmio_base + 0x1c4;
|
||||
reg_state[CTX_RCS_INDIRECT_CTX+1] = 0;
|
||||
reg_state[CTX_RCS_INDIRECT_CTX_OFFSET] = ring->mmio_base + 0x1c8;
|
||||
reg_state[CTX_RCS_INDIRECT_CTX_OFFSET+1] = 0;
|
||||
ASSIGN_CTX_REG(reg_state, CTX_BB_PER_CTX_PTR, RING_BB_PER_CTX_PTR(ring->mmio_base), 0);
|
||||
ASSIGN_CTX_REG(reg_state, CTX_RCS_INDIRECT_CTX, RING_INDIRECT_CTX(ring->mmio_base), 0);
|
||||
ASSIGN_CTX_REG(reg_state, CTX_RCS_INDIRECT_CTX_OFFSET, RING_INDIRECT_CTX_OFFSET(ring->mmio_base), 0);
|
||||
if (ring->wa_ctx.obj) {
|
||||
struct i915_ctx_workarounds *wa_ctx = &ring->wa_ctx;
|
||||
uint32_t ggtt_offset = i915_gem_obj_ggtt_offset(wa_ctx->obj);
|
||||
@ -2319,18 +2306,17 @@ populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_o
|
||||
0x01;
|
||||
}
|
||||
}
|
||||
reg_state[CTX_LRI_HEADER_1] = MI_LOAD_REGISTER_IMM(9);
|
||||
reg_state[CTX_LRI_HEADER_1] |= MI_LRI_FORCE_POSTED;
|
||||
reg_state[CTX_CTX_TIMESTAMP] = ring->mmio_base + 0x3a8;
|
||||
reg_state[CTX_CTX_TIMESTAMP+1] = 0;
|
||||
reg_state[CTX_PDP3_UDW] = GEN8_RING_PDP_UDW(ring, 3);
|
||||
reg_state[CTX_PDP3_LDW] = GEN8_RING_PDP_LDW(ring, 3);
|
||||
reg_state[CTX_PDP2_UDW] = GEN8_RING_PDP_UDW(ring, 2);
|
||||
reg_state[CTX_PDP2_LDW] = GEN8_RING_PDP_LDW(ring, 2);
|
||||
reg_state[CTX_PDP1_UDW] = GEN8_RING_PDP_UDW(ring, 1);
|
||||
reg_state[CTX_PDP1_LDW] = GEN8_RING_PDP_LDW(ring, 1);
|
||||
reg_state[CTX_PDP0_UDW] = GEN8_RING_PDP_UDW(ring, 0);
|
||||
reg_state[CTX_PDP0_LDW] = GEN8_RING_PDP_LDW(ring, 0);
|
||||
reg_state[CTX_LRI_HEADER_1] = MI_LOAD_REGISTER_IMM(9) | MI_LRI_FORCE_POSTED;
|
||||
ASSIGN_CTX_REG(reg_state, CTX_CTX_TIMESTAMP, RING_CTX_TIMESTAMP(ring->mmio_base), 0);
|
||||
/* PDP values well be assigned later if needed */
|
||||
ASSIGN_CTX_REG(reg_state, CTX_PDP3_UDW, GEN8_RING_PDP_UDW(ring, 3), 0);
|
||||
ASSIGN_CTX_REG(reg_state, CTX_PDP3_LDW, GEN8_RING_PDP_LDW(ring, 3), 0);
|
||||
ASSIGN_CTX_REG(reg_state, CTX_PDP2_UDW, GEN8_RING_PDP_UDW(ring, 2), 0);
|
||||
ASSIGN_CTX_REG(reg_state, CTX_PDP2_LDW, GEN8_RING_PDP_LDW(ring, 2), 0);
|
||||
ASSIGN_CTX_REG(reg_state, CTX_PDP1_UDW, GEN8_RING_PDP_UDW(ring, 1), 0);
|
||||
ASSIGN_CTX_REG(reg_state, CTX_PDP1_LDW, GEN8_RING_PDP_LDW(ring, 1), 0);
|
||||
ASSIGN_CTX_REG(reg_state, CTX_PDP0_UDW, GEN8_RING_PDP_UDW(ring, 0), 0);
|
||||
ASSIGN_CTX_REG(reg_state, CTX_PDP0_LDW, GEN8_RING_PDP_LDW(ring, 0), 0);
|
||||
|
||||
if (USES_FULL_48BIT_PPGTT(ppgtt->base.dev)) {
|
||||
/* 64b PPGTT (48bit canonical)
|
||||
@ -2352,8 +2338,8 @@ populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_o
|
||||
|
||||
if (ring->id == RCS) {
|
||||
reg_state[CTX_LRI_HEADER_2] = MI_LOAD_REGISTER_IMM(1);
|
||||
reg_state[CTX_R_PWR_CLK_STATE] = GEN8_R_PWR_CLK_STATE;
|
||||
reg_state[CTX_R_PWR_CLK_STATE+1] = make_rpcs(dev);
|
||||
ASSIGN_CTX_REG(reg_state, CTX_R_PWR_CLK_STATE, GEN8_R_PWR_CLK_STATE,
|
||||
make_rpcs(dev));
|
||||
}
|
||||
|
||||
kunmap_atomic(reg_state);
|
||||
|
@ -29,16 +29,16 @@
|
||||
#define GEN8_CSB_PTR_MASK 0x07
|
||||
|
||||
/* Execlists regs */
|
||||
#define RING_ELSP(ring) ((ring)->mmio_base+0x230)
|
||||
#define RING_EXECLIST_STATUS_LO(ring) ((ring)->mmio_base+0x234)
|
||||
#define RING_EXECLIST_STATUS_HI(ring) ((ring)->mmio_base+0x234 + 4)
|
||||
#define RING_CONTEXT_CONTROL(ring) ((ring)->mmio_base+0x244)
|
||||
#define RING_ELSP(ring) _MMIO((ring)->mmio_base + 0x230)
|
||||
#define RING_EXECLIST_STATUS_LO(ring) _MMIO((ring)->mmio_base + 0x234)
|
||||
#define RING_EXECLIST_STATUS_HI(ring) _MMIO((ring)->mmio_base + 0x234 + 4)
|
||||
#define RING_CONTEXT_CONTROL(ring) _MMIO((ring)->mmio_base + 0x244)
|
||||
#define CTX_CTRL_INHIBIT_SYN_CTX_SWITCH (1 << 3)
|
||||
#define CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT (1 << 0)
|
||||
#define CTX_CTRL_RS_CTX_ENABLE (1 << 1)
|
||||
#define RING_CONTEXT_STATUS_BUF_LO(ring, i) ((ring)->mmio_base+0x370 + (i) * 8)
|
||||
#define RING_CONTEXT_STATUS_BUF_HI(ring, i) ((ring)->mmio_base+0x370 + (i) * 8 + 4)
|
||||
#define RING_CONTEXT_STATUS_PTR(ring) ((ring)->mmio_base+0x3a0)
|
||||
#define RING_CONTEXT_STATUS_BUF_LO(ring, i) _MMIO((ring)->mmio_base + 0x370 + (i) * 8)
|
||||
#define RING_CONTEXT_STATUS_BUF_HI(ring, i) _MMIO((ring)->mmio_base + 0x370 + (i) * 8 + 4)
|
||||
#define RING_CONTEXT_STATUS_PTR(ring) _MMIO((ring)->mmio_base + 0x3a0)
|
||||
|
||||
/* Logical Rings */
|
||||
int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request);
|
||||
@ -70,6 +70,11 @@ static inline void intel_logical_ring_emit(struct intel_ringbuffer *ringbuf,
|
||||
iowrite32(data, ringbuf->virtual_start + ringbuf->tail);
|
||||
ringbuf->tail += 4;
|
||||
}
|
||||
static inline void intel_logical_ring_emit_reg(struct intel_ringbuffer *ringbuf,
|
||||
i915_reg_t reg)
|
||||
{
|
||||
intel_logical_ring_emit(ringbuf, i915_mmio_reg_offset(reg));
|
||||
}
|
||||
|
||||
/* Logical Ring Contexts */
|
||||
|
||||
|
@ -51,7 +51,7 @@ struct intel_lvds_encoder {
|
||||
struct intel_encoder base;
|
||||
|
||||
bool is_dual_link;
|
||||
u32 reg;
|
||||
i915_reg_t reg;
|
||||
u32 a3_power;
|
||||
|
||||
struct intel_lvds_connector *attached_connector;
|
||||
@ -210,7 +210,7 @@ static void intel_enable_lvds(struct intel_encoder *encoder)
|
||||
struct intel_connector *intel_connector =
|
||||
&lvds_encoder->attached_connector->base;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
u32 ctl_reg, stat_reg;
|
||||
i915_reg_t ctl_reg, stat_reg;
|
||||
|
||||
if (HAS_PCH_SPLIT(dev)) {
|
||||
ctl_reg = PCH_PP_CONTROL;
|
||||
@ -235,7 +235,7 @@ static void intel_disable_lvds(struct intel_encoder *encoder)
|
||||
struct drm_device *dev = encoder->base.dev;
|
||||
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
u32 ctl_reg, stat_reg;
|
||||
i915_reg_t ctl_reg, stat_reg;
|
||||
|
||||
if (HAS_PCH_SPLIT(dev)) {
|
||||
ctl_reg = PCH_PP_CONTROL;
|
||||
@ -939,7 +939,7 @@ void intel_lvds_init(struct drm_device *dev)
|
||||
struct drm_display_mode *downclock_mode = NULL;
|
||||
struct edid *edid;
|
||||
struct drm_crtc *crtc;
|
||||
u32 lvds_reg;
|
||||
i915_reg_t lvds_reg;
|
||||
u32 lvds;
|
||||
int pipe;
|
||||
u8 pin;
|
||||
@ -1164,8 +1164,7 @@ out:
|
||||
DRM_DEBUG_KMS("detected %s-link lvds configuration\n",
|
||||
lvds_encoder->is_dual_link ? "dual" : "single");
|
||||
|
||||
lvds_encoder->a3_power = I915_READ(lvds_encoder->reg) &
|
||||
LVDS_A3_POWER_MASK;
|
||||
lvds_encoder->a3_power = lvds & LVDS_A3_POWER_MASK;
|
||||
|
||||
lvds_connector->lid_notifier.notifier_call = intel_lid_notify;
|
||||
if (acpi_lid_notifier_register(&lvds_connector->lid_notifier)) {
|
||||
|
@ -143,7 +143,7 @@ static bool get_mocs_settings(struct drm_device *dev,
|
||||
{
|
||||
bool result = false;
|
||||
|
||||
if (IS_SKYLAKE(dev)) {
|
||||
if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
|
||||
table->size = ARRAY_SIZE(skylake_mocs_table);
|
||||
table->table = skylake_mocs_table;
|
||||
result = true;
|
||||
@ -159,11 +159,30 @@ static bool get_mocs_settings(struct drm_device *dev,
|
||||
return result;
|
||||
}
|
||||
|
||||
static i915_reg_t mocs_register(enum intel_ring_id ring, int index)
|
||||
{
|
||||
switch (ring) {
|
||||
case RCS:
|
||||
return GEN9_GFX_MOCS(index);
|
||||
case VCS:
|
||||
return GEN9_MFX0_MOCS(index);
|
||||
case BCS:
|
||||
return GEN9_BLT_MOCS(index);
|
||||
case VECS:
|
||||
return GEN9_VEBOX_MOCS(index);
|
||||
case VCS2:
|
||||
return GEN9_MFX1_MOCS(index);
|
||||
default:
|
||||
MISSING_CASE(ring);
|
||||
return INVALID_MMIO_REG;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* emit_mocs_control_table() - emit the mocs control table
|
||||
* @req: Request to set up the MOCS table for.
|
||||
* @table: The values to program into the control regs.
|
||||
* @reg_base: The base for the engine that needs to be programmed.
|
||||
* @ring: The engine for whom to emit the registers.
|
||||
*
|
||||
* This function simply emits a MI_LOAD_REGISTER_IMM command for the
|
||||
* given table starting at the given address.
|
||||
@ -172,7 +191,7 @@ static bool get_mocs_settings(struct drm_device *dev,
|
||||
*/
|
||||
static int emit_mocs_control_table(struct drm_i915_gem_request *req,
|
||||
const struct drm_i915_mocs_table *table,
|
||||
u32 reg_base)
|
||||
enum intel_ring_id ring)
|
||||
{
|
||||
struct intel_ringbuffer *ringbuf = req->ringbuf;
|
||||
unsigned int index;
|
||||
@ -191,7 +210,7 @@ static int emit_mocs_control_table(struct drm_i915_gem_request *req,
|
||||
MI_LOAD_REGISTER_IMM(GEN9_NUM_MOCS_ENTRIES));
|
||||
|
||||
for (index = 0; index < table->size; index++) {
|
||||
intel_logical_ring_emit(ringbuf, reg_base + index * 4);
|
||||
intel_logical_ring_emit_reg(ringbuf, mocs_register(ring, index));
|
||||
intel_logical_ring_emit(ringbuf,
|
||||
table->table[index].control_value);
|
||||
}
|
||||
@ -205,7 +224,7 @@ static int emit_mocs_control_table(struct drm_i915_gem_request *req,
|
||||
* that value to all the used entries.
|
||||
*/
|
||||
for (; index < GEN9_NUM_MOCS_ENTRIES; index++) {
|
||||
intel_logical_ring_emit(ringbuf, reg_base + index * 4);
|
||||
intel_logical_ring_emit_reg(ringbuf, mocs_register(ring, index));
|
||||
intel_logical_ring_emit(ringbuf, table->table[0].control_value);
|
||||
}
|
||||
|
||||
@ -253,7 +272,7 @@ static int emit_mocs_l3cc_table(struct drm_i915_gem_request *req,
|
||||
value = (table->table[count].l3cc_value & 0xffff) |
|
||||
((table->table[count + 1].l3cc_value & 0xffff) << 16);
|
||||
|
||||
intel_logical_ring_emit(ringbuf, GEN9_LNCFCMOCS0 + i * 4);
|
||||
intel_logical_ring_emit_reg(ringbuf, GEN9_LNCFCMOCS(i));
|
||||
intel_logical_ring_emit(ringbuf, value);
|
||||
}
|
||||
|
||||
@ -270,7 +289,7 @@ static int emit_mocs_l3cc_table(struct drm_i915_gem_request *req,
|
||||
* they are reserved by the hardware.
|
||||
*/
|
||||
for (; i < GEN9_NUM_MOCS_ENTRIES / 2; i++) {
|
||||
intel_logical_ring_emit(ringbuf, GEN9_LNCFCMOCS0 + i * 4);
|
||||
intel_logical_ring_emit_reg(ringbuf, GEN9_LNCFCMOCS(i));
|
||||
intel_logical_ring_emit(ringbuf, value);
|
||||
|
||||
value = filler;
|
||||
@ -304,26 +323,16 @@ int intel_rcs_context_init_mocs(struct drm_i915_gem_request *req)
|
||||
int ret;
|
||||
|
||||
if (get_mocs_settings(req->ring->dev, &t)) {
|
||||
struct drm_i915_private *dev_priv = req->i915;
|
||||
struct intel_engine_cs *ring;
|
||||
enum intel_ring_id ring_id;
|
||||
|
||||
/* Program the control registers */
|
||||
ret = emit_mocs_control_table(req, &t, GEN9_GFX_MOCS_0);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = emit_mocs_control_table(req, &t, GEN9_MFX0_MOCS_0);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = emit_mocs_control_table(req, &t, GEN9_MFX1_MOCS_0);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = emit_mocs_control_table(req, &t, GEN9_VEBOX_MOCS_0);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = emit_mocs_control_table(req, &t, GEN9_BLT_MOCS_0);
|
||||
if (ret)
|
||||
return ret;
|
||||
for_each_ring(ring, dev_priv, ring_id) {
|
||||
ret = emit_mocs_control_table(req, &t, ring_id);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Now program the l3cc registers */
|
||||
ret = emit_mocs_l3cc_table(req, &t);
|
||||
|
@ -682,7 +682,7 @@ static void intel_didl_outputs(struct drm_device *dev)
|
||||
}
|
||||
|
||||
if (!acpi_video_bus) {
|
||||
DRM_ERROR("No ACPI video bus found\n");
|
||||
DRM_DEBUG_KMS("No ACPI video bus found\n");
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -749,7 +749,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
|
||||
if (ret != 0)
|
||||
return ret;
|
||||
|
||||
ret = i915_gem_object_pin_to_display_plane(new_bo, 0, NULL, NULL,
|
||||
ret = i915_gem_object_pin_to_display_plane(new_bo, 0,
|
||||
&i915_ggtt_view_normal);
|
||||
if (ret != 0)
|
||||
return ret;
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -80,7 +80,7 @@ static void intel_psr_write_vsc(struct intel_dp *intel_dp,
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
|
||||
enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
|
||||
u32 ctl_reg = HSW_TVIDEO_DIP_CTL(cpu_transcoder);
|
||||
i915_reg_t ctl_reg = HSW_TVIDEO_DIP_CTL(cpu_transcoder);
|
||||
uint32_t *data = (uint32_t *) vsc_psr;
|
||||
unsigned int i;
|
||||
|
||||
@ -151,13 +151,31 @@ static void vlv_psr_enable_sink(struct intel_dp *intel_dp)
|
||||
DP_PSR_ENABLE | DP_PSR_MAIN_LINK_ACTIVE);
|
||||
}
|
||||
|
||||
static i915_reg_t psr_aux_ctl_reg(struct drm_i915_private *dev_priv,
|
||||
enum port port)
|
||||
{
|
||||
if (INTEL_INFO(dev_priv)->gen >= 9)
|
||||
return DP_AUX_CH_CTL(port);
|
||||
else
|
||||
return EDP_PSR_AUX_CTL;
|
||||
}
|
||||
|
||||
static i915_reg_t psr_aux_data_reg(struct drm_i915_private *dev_priv,
|
||||
enum port port, int index)
|
||||
{
|
||||
if (INTEL_INFO(dev_priv)->gen >= 9)
|
||||
return DP_AUX_CH_DATA(port, index);
|
||||
else
|
||||
return EDP_PSR_AUX_DATA(index);
|
||||
}
|
||||
|
||||
static void hsw_psr_enable_sink(struct intel_dp *intel_dp)
|
||||
{
|
||||
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
|
||||
struct drm_device *dev = dig_port->base.base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
uint32_t aux_clock_divider;
|
||||
uint32_t aux_data_reg, aux_ctl_reg;
|
||||
i915_reg_t aux_ctl_reg;
|
||||
int precharge = 0x3;
|
||||
static const uint8_t aux_msg[] = {
|
||||
[0] = DP_AUX_NATIVE_WRITE << 4,
|
||||
@ -166,6 +184,7 @@ static void hsw_psr_enable_sink(struct intel_dp *intel_dp)
|
||||
[3] = 1 - 1,
|
||||
[4] = DP_SET_POWER_D0,
|
||||
};
|
||||
enum port port = dig_port->port;
|
||||
int i;
|
||||
|
||||
BUILD_BUG_ON(sizeof(aux_msg) > 20);
|
||||
@ -181,14 +200,11 @@ static void hsw_psr_enable_sink(struct intel_dp *intel_dp)
|
||||
DP_SINK_DEVICE_AUX_FRAME_SYNC_CONF,
|
||||
DP_AUX_FRAME_SYNC_ENABLE);
|
||||
|
||||
aux_data_reg = (INTEL_INFO(dev)->gen >= 9) ?
|
||||
DPA_AUX_CH_DATA1 : EDP_PSR_AUX_DATA1(dev);
|
||||
aux_ctl_reg = (INTEL_INFO(dev)->gen >= 9) ?
|
||||
DPA_AUX_CH_CTL : EDP_PSR_AUX_CTL(dev);
|
||||
aux_ctl_reg = psr_aux_ctl_reg(dev_priv, port);
|
||||
|
||||
/* Setup AUX registers */
|
||||
for (i = 0; i < sizeof(aux_msg); i += 4)
|
||||
I915_WRITE(aux_data_reg + i,
|
||||
I915_WRITE(psr_aux_data_reg(dev_priv, port, i >> 2),
|
||||
intel_dp_pack_aux(&aux_msg[i], sizeof(aux_msg) - i));
|
||||
|
||||
if (INTEL_INFO(dev)->gen >= 9) {
|
||||
@ -267,16 +283,11 @@ static void hsw_psr_enable_source(struct intel_dp *intel_dp)
|
||||
const uint32_t link_entry_time = EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
|
||||
|
||||
if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT) {
|
||||
/* It doesn't mean we shouldn't send TPS patters, so let's
|
||||
send the minimal TP1 possible and skip TP2. */
|
||||
val |= EDP_PSR_TP1_TIME_100us;
|
||||
val |= EDP_PSR_TP2_TP3_TIME_0us;
|
||||
val |= EDP_PSR_SKIP_AUX_EXIT;
|
||||
/* Sink should be able to train with the 5 or 6 idle patterns */
|
||||
idle_frames += 4;
|
||||
}
|
||||
|
||||
I915_WRITE(EDP_PSR_CTL(dev), val |
|
||||
I915_WRITE(EDP_PSR_CTL, val |
|
||||
(IS_BROADWELL(dev) ? 0 : link_entry_time) |
|
||||
max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT |
|
||||
idle_frames << EDP_PSR_IDLE_FRAME_SHIFT |
|
||||
@ -340,7 +351,7 @@ static void intel_psr_activate(struct intel_dp *intel_dp)
|
||||
struct drm_device *dev = intel_dig_port->base.base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
WARN_ON(I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE);
|
||||
WARN_ON(I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE);
|
||||
WARN_ON(dev_priv->psr.active);
|
||||
lockdep_assert_held(&dev_priv->psr.lock);
|
||||
|
||||
@ -404,7 +415,7 @@ void intel_psr_enable(struct intel_dp *intel_dp)
|
||||
}
|
||||
|
||||
/* Avoid continuous PSR exit by masking memup and hpd */
|
||||
I915_WRITE(EDP_PSR_DEBUG_CTL(dev), EDP_PSR_DEBUG_MASK_MEMUP |
|
||||
I915_WRITE(EDP_PSR_DEBUG_CTL, EDP_PSR_DEBUG_MASK_MEMUP |
|
||||
EDP_PSR_DEBUG_MASK_HPD);
|
||||
|
||||
/* Enable PSR on the panel */
|
||||
@ -427,6 +438,19 @@ void intel_psr_enable(struct intel_dp *intel_dp)
|
||||
vlv_psr_enable_source(intel_dp);
|
||||
}
|
||||
|
||||
/*
|
||||
* FIXME: Activation should happen immediately since this function
|
||||
* is just called after pipe is fully trained and enabled.
|
||||
* However on every platform we face issues when first activation
|
||||
* follows a modeset so quickly.
|
||||
* - On VLV/CHV we get bank screen on first activation
|
||||
* - On HSW/BDW we get a recoverable frozen screen until next
|
||||
* exit-activate sequence.
|
||||
*/
|
||||
if (INTEL_INFO(dev)->gen < 9)
|
||||
schedule_delayed_work(&dev_priv->psr.work,
|
||||
msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5));
|
||||
|
||||
dev_priv->psr.enabled = intel_dp;
|
||||
unlock:
|
||||
mutex_unlock(&dev_priv->psr.lock);
|
||||
@ -466,17 +490,17 @@ static void hsw_psr_disable(struct intel_dp *intel_dp)
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
if (dev_priv->psr.active) {
|
||||
I915_WRITE(EDP_PSR_CTL(dev),
|
||||
I915_READ(EDP_PSR_CTL(dev)) & ~EDP_PSR_ENABLE);
|
||||
I915_WRITE(EDP_PSR_CTL,
|
||||
I915_READ(EDP_PSR_CTL) & ~EDP_PSR_ENABLE);
|
||||
|
||||
/* Wait till PSR is idle */
|
||||
if (_wait_for((I915_READ(EDP_PSR_STATUS_CTL(dev)) &
|
||||
if (_wait_for((I915_READ(EDP_PSR_STATUS_CTL) &
|
||||
EDP_PSR_STATUS_STATE_MASK) == 0, 2000, 10))
|
||||
DRM_ERROR("Timed out waiting for PSR Idle State\n");
|
||||
|
||||
dev_priv->psr.active = false;
|
||||
} else {
|
||||
WARN_ON(I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE);
|
||||
WARN_ON(I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE);
|
||||
}
|
||||
}
|
||||
|
||||
@ -523,7 +547,7 @@ static void intel_psr_work(struct work_struct *work)
|
||||
* and be ready for re-enable.
|
||||
*/
|
||||
if (HAS_DDI(dev_priv->dev)) {
|
||||
if (wait_for((I915_READ(EDP_PSR_STATUS_CTL(dev_priv->dev)) &
|
||||
if (wait_for((I915_READ(EDP_PSR_STATUS_CTL) &
|
||||
EDP_PSR_STATUS_STATE_MASK) == 0, 50)) {
|
||||
DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n");
|
||||
return;
|
||||
@ -566,11 +590,11 @@ static void intel_psr_exit(struct drm_device *dev)
|
||||
return;
|
||||
|
||||
if (HAS_DDI(dev)) {
|
||||
val = I915_READ(EDP_PSR_CTL(dev));
|
||||
val = I915_READ(EDP_PSR_CTL);
|
||||
|
||||
WARN_ON(!(val & EDP_PSR_ENABLE));
|
||||
|
||||
I915_WRITE(EDP_PSR_CTL(dev), val & ~EDP_PSR_ENABLE);
|
||||
I915_WRITE(EDP_PSR_CTL, val & ~EDP_PSR_ENABLE);
|
||||
} else {
|
||||
val = I915_READ(VLV_PSRCTL(pipe));
|
||||
|
||||
@ -700,7 +724,6 @@ void intel_psr_flush(struct drm_device *dev,
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_crtc *crtc;
|
||||
enum pipe pipe;
|
||||
int delay_ms = HAS_DDI(dev) ? 100 : 500;
|
||||
|
||||
mutex_lock(&dev_priv->psr.lock);
|
||||
if (!dev_priv->psr.enabled) {
|
||||
@ -735,8 +758,9 @@ void intel_psr_flush(struct drm_device *dev,
|
||||
}
|
||||
|
||||
if (!dev_priv->psr.active && !dev_priv->psr.busy_frontbuffer_bits)
|
||||
schedule_delayed_work(&dev_priv->psr.work,
|
||||
msecs_to_jiffies(delay_ms));
|
||||
if (!work_busy(&dev_priv->psr.work.work))
|
||||
schedule_delayed_work(&dev_priv->psr.work,
|
||||
msecs_to_jiffies(100));
|
||||
mutex_unlock(&dev_priv->psr.lock);
|
||||
}
|
||||
|
||||
@ -751,6 +775,9 @@ void intel_psr_init(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
dev_priv->psr_mmio_base = IS_HASWELL(dev_priv) ?
|
||||
HSW_EDP_PSR_BASE : BDW_EDP_PSR_BASE;
|
||||
|
||||
INIT_DELAYED_WORK(&dev_priv->psr.work, intel_psr_work);
|
||||
mutex_init(&dev_priv->psr.lock);
|
||||
}
|
||||
|
@ -481,7 +481,7 @@ static void intel_ring_setup_status_page(struct intel_engine_cs *ring)
|
||||
{
|
||||
struct drm_device *dev = ring->dev;
|
||||
struct drm_i915_private *dev_priv = ring->dev->dev_private;
|
||||
u32 mmio = 0;
|
||||
i915_reg_t mmio;
|
||||
|
||||
/* The ring status page addresses are no longer next to the rest of
|
||||
* the ring registers as of gen7.
|
||||
@ -524,7 +524,7 @@ static void intel_ring_setup_status_page(struct intel_engine_cs *ring)
|
||||
* invalidating the TLB?
|
||||
*/
|
||||
if (INTEL_INFO(dev)->gen >= 6 && INTEL_INFO(dev)->gen < 8) {
|
||||
u32 reg = RING_INSTPM(ring->mmio_base);
|
||||
i915_reg_t reg = RING_INSTPM(ring->mmio_base);
|
||||
|
||||
/* ring should be idle before issuing a sync flush*/
|
||||
WARN_ON((I915_READ_MODE(ring) & MODE_IDLE) == 0);
|
||||
@ -733,7 +733,7 @@ static int intel_ring_workarounds_emit(struct drm_i915_gem_request *req)
|
||||
|
||||
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(w->count));
|
||||
for (i = 0; i < w->count; i++) {
|
||||
intel_ring_emit(ring, w->reg[i].addr);
|
||||
intel_ring_emit_reg(ring, w->reg[i].addr);
|
||||
intel_ring_emit(ring, w->reg[i].value);
|
||||
}
|
||||
intel_ring_emit(ring, MI_NOOP);
|
||||
@ -766,7 +766,8 @@ static int intel_rcs_ctx_init(struct drm_i915_gem_request *req)
|
||||
}
|
||||
|
||||
static int wa_add(struct drm_i915_private *dev_priv,
|
||||
const u32 addr, const u32 mask, const u32 val)
|
||||
i915_reg_t addr,
|
||||
const u32 mask, const u32 val)
|
||||
{
|
||||
const u32 idx = dev_priv->workarounds.count;
|
||||
|
||||
@ -924,17 +925,15 @@ static int gen9_init_workarounds(struct intel_engine_cs *ring)
|
||||
WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
|
||||
GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC);
|
||||
|
||||
if ((IS_SKYLAKE(dev) && (INTEL_REVID(dev) == SKL_REVID_A0 ||
|
||||
INTEL_REVID(dev) == SKL_REVID_B0)) ||
|
||||
(IS_BROXTON(dev) && INTEL_REVID(dev) < BXT_REVID_B0)) {
|
||||
/* WaDisableDgMirrorFixInHalfSliceChicken5:skl,bxt */
|
||||
/* WaDisableDgMirrorFixInHalfSliceChicken5:skl,bxt */
|
||||
if (IS_SKL_REVID(dev, 0, SKL_REVID_B0) ||
|
||||
IS_BXT_REVID(dev, 0, BXT_REVID_A1))
|
||||
WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5,
|
||||
GEN9_DG_MIRROR_FIX_ENABLE);
|
||||
}
|
||||
|
||||
if ((IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0) ||
|
||||
(IS_BROXTON(dev) && INTEL_REVID(dev) < BXT_REVID_B0)) {
|
||||
/* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:skl,bxt */
|
||||
/* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:skl,bxt */
|
||||
if (IS_SKL_REVID(dev, 0, SKL_REVID_B0) ||
|
||||
IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
|
||||
WA_SET_BIT_MASKED(GEN7_COMMON_SLICE_CHICKEN1,
|
||||
GEN9_RHWO_OPTIMIZATION_DISABLE);
|
||||
/*
|
||||
@ -944,12 +943,10 @@ static int gen9_init_workarounds(struct intel_engine_cs *ring)
|
||||
*/
|
||||
}
|
||||
|
||||
if ((IS_SKYLAKE(dev) && INTEL_REVID(dev) >= SKL_REVID_C0) ||
|
||||
IS_BROXTON(dev)) {
|
||||
/* WaEnableYV12BugFixInHalfSliceChicken7:skl,bxt */
|
||||
/* WaEnableYV12BugFixInHalfSliceChicken7:skl,bxt */
|
||||
if (IS_SKL_REVID(dev, SKL_REVID_C0, REVID_FOREVER) || IS_BROXTON(dev))
|
||||
WA_SET_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN7,
|
||||
GEN9_ENABLE_YV12_BUGFIX);
|
||||
}
|
||||
|
||||
/* Wa4x4STCOptimizationDisable:skl,bxt */
|
||||
/* WaDisablePartialResolveInVc:skl,bxt */
|
||||
@ -961,24 +958,22 @@ static int gen9_init_workarounds(struct intel_engine_cs *ring)
|
||||
GEN9_CCS_TLB_PREFETCH_ENABLE);
|
||||
|
||||
/* WaDisableMaskBasedCammingInRCC:skl,bxt */
|
||||
if ((IS_SKYLAKE(dev) && INTEL_REVID(dev) == SKL_REVID_C0) ||
|
||||
(IS_BROXTON(dev) && INTEL_REVID(dev) < BXT_REVID_B0))
|
||||
if (IS_SKL_REVID(dev, SKL_REVID_C0, SKL_REVID_C0) ||
|
||||
IS_BXT_REVID(dev, 0, BXT_REVID_A1))
|
||||
WA_SET_BIT_MASKED(SLICE_ECO_CHICKEN0,
|
||||
PIXEL_MASK_CAMMING_DISABLE);
|
||||
|
||||
/* WaForceContextSaveRestoreNonCoherent:skl,bxt */
|
||||
tmp = HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT;
|
||||
if ((IS_SKYLAKE(dev) && INTEL_REVID(dev) == SKL_REVID_F0) ||
|
||||
(IS_BROXTON(dev) && INTEL_REVID(dev) >= BXT_REVID_B0))
|
||||
if (IS_SKL_REVID(dev, SKL_REVID_F0, SKL_REVID_F0) ||
|
||||
IS_BXT_REVID(dev, BXT_REVID_B0, REVID_FOREVER))
|
||||
tmp |= HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE;
|
||||
WA_SET_BIT_MASKED(HDC_CHICKEN0, tmp);
|
||||
|
||||
/* WaDisableSamplerPowerBypassForSOPingPong:skl,bxt */
|
||||
if (IS_SKYLAKE(dev) ||
|
||||
(IS_BROXTON(dev) && INTEL_REVID(dev) <= BXT_REVID_B0)) {
|
||||
if (IS_SKYLAKE(dev) || IS_BXT_REVID(dev, 0, BXT_REVID_B0))
|
||||
WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
|
||||
GEN8_SAMPLER_POWER_BYPASS_DIS);
|
||||
}
|
||||
|
||||
/* WaDisableSTUnitPowerOptimization:skl,bxt */
|
||||
WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN2, GEN8_ST_PO_DISABLE);
|
||||
@ -1038,7 +1033,7 @@ static int skl_init_workarounds(struct intel_engine_cs *ring)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (INTEL_REVID(dev) <= SKL_REVID_D0) {
|
||||
if (IS_SKL_REVID(dev, 0, SKL_REVID_D0)) {
|
||||
/* WaDisableHDCInvalidation:skl */
|
||||
I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
|
||||
BDW_DISABLE_HDC_INVALIDATION);
|
||||
@ -1051,23 +1046,23 @@ static int skl_init_workarounds(struct intel_engine_cs *ring)
|
||||
/* GEN8_L3SQCREG4 has a dependency with WA batch so any new changes
|
||||
* involving this register should also be added to WA batch as required.
|
||||
*/
|
||||
if (INTEL_REVID(dev) <= SKL_REVID_E0)
|
||||
if (IS_SKL_REVID(dev, 0, SKL_REVID_E0))
|
||||
/* WaDisableLSQCROPERFforOCL:skl */
|
||||
I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) |
|
||||
GEN8_LQSC_RO_PERF_DIS);
|
||||
|
||||
/* WaEnableGapsTsvCreditFix:skl */
|
||||
if (IS_SKYLAKE(dev) && (INTEL_REVID(dev) >= SKL_REVID_C0)) {
|
||||
if (IS_SKL_REVID(dev, SKL_REVID_C0, REVID_FOREVER)) {
|
||||
I915_WRITE(GEN8_GARBCNTL, (I915_READ(GEN8_GARBCNTL) |
|
||||
GEN9_GAPS_TSV_CREDIT_DISABLE));
|
||||
}
|
||||
|
||||
/* WaDisablePowerCompilerClockGating:skl */
|
||||
if (INTEL_REVID(dev) == SKL_REVID_B0)
|
||||
if (IS_SKL_REVID(dev, SKL_REVID_B0, SKL_REVID_B0))
|
||||
WA_SET_BIT_MASKED(HIZ_CHICKEN,
|
||||
BDW_HIZ_POWER_COMPILER_CLOCK_GATING_DISABLE);
|
||||
|
||||
if (INTEL_REVID(dev) <= SKL_REVID_D0) {
|
||||
if (IS_SKL_REVID(dev, 0, SKL_REVID_D0)) {
|
||||
/*
|
||||
*Use Force Non-Coherent whenever executing a 3D context. This
|
||||
* is a workaround for a possible hang in the unlikely event
|
||||
@ -1078,19 +1073,17 @@ static int skl_init_workarounds(struct intel_engine_cs *ring)
|
||||
HDC_FORCE_NON_COHERENT);
|
||||
}
|
||||
|
||||
if (INTEL_REVID(dev) == SKL_REVID_C0 ||
|
||||
INTEL_REVID(dev) == SKL_REVID_D0)
|
||||
/* WaBarrierPerformanceFixDisable:skl */
|
||||
/* WaBarrierPerformanceFixDisable:skl */
|
||||
if (IS_SKL_REVID(dev, SKL_REVID_C0, SKL_REVID_D0))
|
||||
WA_SET_BIT_MASKED(HDC_CHICKEN0,
|
||||
HDC_FENCE_DEST_SLM_DISABLE |
|
||||
HDC_BARRIER_PERFORMANCE_DISABLE);
|
||||
|
||||
/* WaDisableSbeCacheDispatchPortSharing:skl */
|
||||
if (INTEL_REVID(dev) <= SKL_REVID_F0) {
|
||||
if (IS_SKL_REVID(dev, 0, SKL_REVID_F0))
|
||||
WA_SET_BIT_MASKED(
|
||||
GEN7_HALF_SLICE_CHICKEN1,
|
||||
GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
|
||||
}
|
||||
|
||||
return skl_tune_iz_hashing(ring);
|
||||
}
|
||||
@ -1107,11 +1100,11 @@ static int bxt_init_workarounds(struct intel_engine_cs *ring)
|
||||
|
||||
/* WaStoreMultiplePTEenable:bxt */
|
||||
/* This is a requirement according to Hardware specification */
|
||||
if (INTEL_REVID(dev) == BXT_REVID_A0)
|
||||
if (IS_BXT_REVID(dev, 0, BXT_REVID_A1))
|
||||
I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_TLBPF);
|
||||
|
||||
/* WaSetClckGatingDisableMedia:bxt */
|
||||
if (INTEL_REVID(dev) == BXT_REVID_A0) {
|
||||
if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
|
||||
I915_WRITE(GEN7_MISCCPCTL, (I915_READ(GEN7_MISCCPCTL) &
|
||||
~GEN8_DOP_CLOCK_GATE_MEDIA_ENABLE));
|
||||
}
|
||||
@ -1121,7 +1114,7 @@ static int bxt_init_workarounds(struct intel_engine_cs *ring)
|
||||
STALL_DOP_GATING_DISABLE);
|
||||
|
||||
/* WaDisableSbeCacheDispatchPortSharing:bxt */
|
||||
if (INTEL_REVID(dev) <= BXT_REVID_B0) {
|
||||
if (IS_BXT_REVID(dev, 0, BXT_REVID_B0)) {
|
||||
WA_SET_BIT_MASKED(
|
||||
GEN7_HALF_SLICE_CHICKEN1,
|
||||
GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
|
||||
@ -1319,11 +1312,13 @@ static int gen6_signal(struct drm_i915_gem_request *signaller_req,
|
||||
return ret;
|
||||
|
||||
for_each_ring(useless, dev_priv, i) {
|
||||
u32 mbox_reg = signaller->semaphore.mbox.signal[i];
|
||||
if (mbox_reg != GEN6_NOSYNC) {
|
||||
i915_reg_t mbox_reg = signaller->semaphore.mbox.signal[i];
|
||||
|
||||
if (i915_mmio_reg_valid(mbox_reg)) {
|
||||
u32 seqno = i915_gem_request_get_seqno(signaller_req);
|
||||
|
||||
intel_ring_emit(signaller, MI_LOAD_REGISTER_IMM(1));
|
||||
intel_ring_emit(signaller, mbox_reg);
|
||||
intel_ring_emit_reg(signaller, mbox_reg);
|
||||
intel_ring_emit(signaller, seqno);
|
||||
}
|
||||
}
|
||||
@ -2004,11 +1999,35 @@ static int init_phys_status_page(struct intel_engine_cs *ring)
|
||||
|
||||
void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf)
|
||||
{
|
||||
iounmap(ringbuf->virtual_start);
|
||||
if (HAS_LLC(ringbuf->obj->base.dev) && !ringbuf->obj->stolen)
|
||||
vunmap(ringbuf->virtual_start);
|
||||
else
|
||||
iounmap(ringbuf->virtual_start);
|
||||
ringbuf->virtual_start = NULL;
|
||||
i915_gem_object_ggtt_unpin(ringbuf->obj);
|
||||
}
|
||||
|
||||
static u32 *vmap_obj(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
struct sg_page_iter sg_iter;
|
||||
struct page **pages;
|
||||
void *addr;
|
||||
int i;
|
||||
|
||||
pages = drm_malloc_ab(obj->base.size >> PAGE_SHIFT, sizeof(*pages));
|
||||
if (pages == NULL)
|
||||
return NULL;
|
||||
|
||||
i = 0;
|
||||
for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0)
|
||||
pages[i++] = sg_page_iter_page(&sg_iter);
|
||||
|
||||
addr = vmap(pages, i, 0, PAGE_KERNEL);
|
||||
drm_free_large(pages);
|
||||
|
||||
return addr;
|
||||
}
|
||||
|
||||
int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
|
||||
struct intel_ringbuffer *ringbuf)
|
||||
{
|
||||
@ -2016,21 +2035,39 @@ int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
|
||||
struct drm_i915_gem_object *obj = ringbuf->obj;
|
||||
int ret;
|
||||
|
||||
ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, PIN_MAPPABLE);
|
||||
if (ret)
|
||||
return ret;
|
||||
if (HAS_LLC(dev_priv) && !obj->stolen) {
|
||||
ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, 0);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = i915_gem_object_set_to_gtt_domain(obj, true);
|
||||
if (ret) {
|
||||
i915_gem_object_ggtt_unpin(obj);
|
||||
return ret;
|
||||
}
|
||||
ret = i915_gem_object_set_to_cpu_domain(obj, true);
|
||||
if (ret) {
|
||||
i915_gem_object_ggtt_unpin(obj);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ringbuf->virtual_start = ioremap_wc(dev_priv->gtt.mappable_base +
|
||||
i915_gem_obj_ggtt_offset(obj), ringbuf->size);
|
||||
if (ringbuf->virtual_start == NULL) {
|
||||
i915_gem_object_ggtt_unpin(obj);
|
||||
return -EINVAL;
|
||||
ringbuf->virtual_start = vmap_obj(obj);
|
||||
if (ringbuf->virtual_start == NULL) {
|
||||
i915_gem_object_ggtt_unpin(obj);
|
||||
return -ENOMEM;
|
||||
}
|
||||
} else {
|
||||
ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, PIN_MAPPABLE);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = i915_gem_object_set_to_gtt_domain(obj, true);
|
||||
if (ret) {
|
||||
i915_gem_object_ggtt_unpin(obj);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ringbuf->virtual_start = ioremap_wc(dev_priv->gtt.mappable_base +
|
||||
i915_gem_obj_ggtt_offset(obj), ringbuf->size);
|
||||
if (ringbuf->virtual_start == NULL) {
|
||||
i915_gem_object_ggtt_unpin(obj);
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
@ -2070,10 +2107,14 @@ intel_engine_create_ringbuffer(struct intel_engine_cs *engine, int size)
|
||||
int ret;
|
||||
|
||||
ring = kzalloc(sizeof(*ring), GFP_KERNEL);
|
||||
if (ring == NULL)
|
||||
if (ring == NULL) {
|
||||
DRM_DEBUG_DRIVER("Failed to allocate ringbuffer %s\n",
|
||||
engine->name);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
ring->ring = engine;
|
||||
list_add(&ring->link, &engine->buffers);
|
||||
|
||||
ring->size = size;
|
||||
/* Workaround an erratum on the i830 which causes a hang if
|
||||
@ -2089,8 +2130,9 @@ intel_engine_create_ringbuffer(struct intel_engine_cs *engine, int size)
|
||||
|
||||
ret = intel_alloc_ringbuffer_obj(engine->dev, ring);
|
||||
if (ret) {
|
||||
DRM_ERROR("Failed to allocate ringbuffer %s: %d\n",
|
||||
engine->name, ret);
|
||||
DRM_DEBUG_DRIVER("Failed to allocate ringbuffer %s: %d\n",
|
||||
engine->name, ret);
|
||||
list_del(&ring->link);
|
||||
kfree(ring);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
@ -2102,6 +2144,7 @@ void
|
||||
intel_ringbuffer_free(struct intel_ringbuffer *ring)
|
||||
{
|
||||
intel_destroy_ringbuffer_obj(ring);
|
||||
list_del(&ring->link);
|
||||
kfree(ring);
|
||||
}
|
||||
|
||||
@ -2117,6 +2160,7 @@ static int intel_init_ring_buffer(struct drm_device *dev,
|
||||
INIT_LIST_HEAD(&ring->active_list);
|
||||
INIT_LIST_HEAD(&ring->request_list);
|
||||
INIT_LIST_HEAD(&ring->execlist_queue);
|
||||
INIT_LIST_HEAD(&ring->buffers);
|
||||
i915_gem_batch_pool_init(dev, &ring->batch_pool);
|
||||
memset(ring->semaphore.sync_seqno, 0, sizeof(ring->semaphore.sync_seqno));
|
||||
|
||||
|
@ -100,6 +100,7 @@ struct intel_ringbuffer {
|
||||
void __iomem *virtual_start;
|
||||
|
||||
struct intel_engine_cs *ring;
|
||||
struct list_head link;
|
||||
|
||||
u32 head;
|
||||
u32 tail;
|
||||
@ -157,6 +158,7 @@ struct intel_engine_cs {
|
||||
u32 mmio_base;
|
||||
struct drm_device *dev;
|
||||
struct intel_ringbuffer *buffer;
|
||||
struct list_head buffers;
|
||||
|
||||
/*
|
||||
* A pool of objects to use as shadow copies of client batch buffers
|
||||
@ -247,7 +249,7 @@ struct intel_engine_cs {
|
||||
/* our mbox written by others */
|
||||
u32 wait[I915_NUM_RINGS];
|
||||
/* mboxes this ring signals to */
|
||||
u32 signal[I915_NUM_RINGS];
|
||||
i915_reg_t signal[I915_NUM_RINGS];
|
||||
} mbox;
|
||||
u64 signal_ggtt[I915_NUM_RINGS];
|
||||
};
|
||||
@ -441,6 +443,11 @@ static inline void intel_ring_emit(struct intel_engine_cs *ring,
|
||||
iowrite32(data, ringbuf->virtual_start + ringbuf->tail);
|
||||
ringbuf->tail += 4;
|
||||
}
|
||||
static inline void intel_ring_emit_reg(struct intel_engine_cs *ring,
|
||||
i915_reg_t reg)
|
||||
{
|
||||
intel_ring_emit(ring, i915_mmio_reg_offset(reg));
|
||||
}
|
||||
static inline void intel_ring_advance(struct intel_engine_cs *ring)
|
||||
{
|
||||
struct intel_ringbuffer *ringbuf = ring->buffer;
|
||||
|
@ -49,9 +49,6 @@
|
||||
* present for a given platform.
|
||||
*/
|
||||
|
||||
#define GEN9_ENABLE_DC5(dev) 0
|
||||
#define SKL_ENABLE_DC6(dev) IS_SKYLAKE(dev)
|
||||
|
||||
#define for_each_power_well(i, power_well, domain_mask, power_domains) \
|
||||
for (i = 0; \
|
||||
i < (power_domains)->power_well_count && \
|
||||
@ -244,12 +241,6 @@ static void skl_power_well_post_enable(struct drm_i915_private *dev_priv,
|
||||
gen8_irq_power_well_post_enable(dev_priv,
|
||||
1 << PIPE_C | 1 << PIPE_B);
|
||||
}
|
||||
|
||||
if (power_well->data == SKL_DISP_PW_1) {
|
||||
if (!dev_priv->power_domains.initializing)
|
||||
intel_prepare_ddi(dev);
|
||||
gen8_irq_power_well_post_enable(dev_priv, 1 << PIPE_A);
|
||||
}
|
||||
}
|
||||
|
||||
static void hsw_set_power_well(struct drm_i915_private *dev_priv,
|
||||
@ -292,58 +283,38 @@ static void hsw_set_power_well(struct drm_i915_private *dev_priv,
|
||||
BIT(POWER_DOMAIN_TRANSCODER_C) | \
|
||||
BIT(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
|
||||
BIT(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
|
||||
BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
|
||||
BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
|
||||
BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
|
||||
BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
|
||||
BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \
|
||||
BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
|
||||
BIT(POWER_DOMAIN_PORT_DDI_E_2_LANES) | \
|
||||
BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \
|
||||
BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \
|
||||
BIT(POWER_DOMAIN_PORT_DDI_D_LANES) | \
|
||||
BIT(POWER_DOMAIN_PORT_DDI_E_LANES) | \
|
||||
BIT(POWER_DOMAIN_AUX_B) | \
|
||||
BIT(POWER_DOMAIN_AUX_C) | \
|
||||
BIT(POWER_DOMAIN_AUX_D) | \
|
||||
BIT(POWER_DOMAIN_AUDIO) | \
|
||||
BIT(POWER_DOMAIN_VGA) | \
|
||||
BIT(POWER_DOMAIN_INIT))
|
||||
#define SKL_DISPLAY_POWERWELL_1_POWER_DOMAINS ( \
|
||||
SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
|
||||
BIT(POWER_DOMAIN_PLLS) | \
|
||||
BIT(POWER_DOMAIN_PIPE_A) | \
|
||||
BIT(POWER_DOMAIN_TRANSCODER_EDP) | \
|
||||
BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \
|
||||
BIT(POWER_DOMAIN_PORT_DDI_A_2_LANES) | \
|
||||
BIT(POWER_DOMAIN_PORT_DDI_A_4_LANES) | \
|
||||
BIT(POWER_DOMAIN_AUX_A) | \
|
||||
BIT(POWER_DOMAIN_INIT))
|
||||
#define SKL_DISPLAY_DDI_A_E_POWER_DOMAINS ( \
|
||||
BIT(POWER_DOMAIN_PORT_DDI_A_2_LANES) | \
|
||||
BIT(POWER_DOMAIN_PORT_DDI_A_4_LANES) | \
|
||||
BIT(POWER_DOMAIN_PORT_DDI_E_2_LANES) | \
|
||||
BIT(POWER_DOMAIN_PORT_DDI_A_LANES) | \
|
||||
BIT(POWER_DOMAIN_PORT_DDI_E_LANES) | \
|
||||
BIT(POWER_DOMAIN_INIT))
|
||||
#define SKL_DISPLAY_DDI_B_POWER_DOMAINS ( \
|
||||
BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
|
||||
BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
|
||||
BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \
|
||||
BIT(POWER_DOMAIN_INIT))
|
||||
#define SKL_DISPLAY_DDI_C_POWER_DOMAINS ( \
|
||||
BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
|
||||
BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
|
||||
BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \
|
||||
BIT(POWER_DOMAIN_INIT))
|
||||
#define SKL_DISPLAY_DDI_D_POWER_DOMAINS ( \
|
||||
BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \
|
||||
BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
|
||||
BIT(POWER_DOMAIN_PORT_DDI_D_LANES) | \
|
||||
BIT(POWER_DOMAIN_INIT))
|
||||
#define SKL_DISPLAY_MISC_IO_POWER_DOMAINS ( \
|
||||
SKL_DISPLAY_POWERWELL_1_POWER_DOMAINS | \
|
||||
BIT(POWER_DOMAIN_PLLS) | \
|
||||
#define SKL_DISPLAY_DC_OFF_POWER_DOMAINS ( \
|
||||
SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
|
||||
BIT(POWER_DOMAIN_MODESET) | \
|
||||
BIT(POWER_DOMAIN_AUX_A) | \
|
||||
BIT(POWER_DOMAIN_INIT))
|
||||
#define SKL_DISPLAY_ALWAYS_ON_POWER_DOMAINS ( \
|
||||
(POWER_DOMAIN_MASK & ~(SKL_DISPLAY_POWERWELL_1_POWER_DOMAINS | \
|
||||
(POWER_DOMAIN_MASK & ~( \
|
||||
SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
|
||||
SKL_DISPLAY_DDI_A_E_POWER_DOMAINS | \
|
||||
SKL_DISPLAY_DDI_B_POWER_DOMAINS | \
|
||||
SKL_DISPLAY_DDI_C_POWER_DOMAINS | \
|
||||
SKL_DISPLAY_DDI_D_POWER_DOMAINS | \
|
||||
SKL_DISPLAY_MISC_IO_POWER_DOMAINS)) | \
|
||||
SKL_DISPLAY_DC_OFF_POWER_DOMAINS)) | \
|
||||
BIT(POWER_DOMAIN_INIT))
|
||||
|
||||
#define BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \
|
||||
@ -354,25 +325,28 @@ static void hsw_set_power_well(struct drm_i915_private *dev_priv,
|
||||
BIT(POWER_DOMAIN_TRANSCODER_C) | \
|
||||
BIT(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
|
||||
BIT(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
|
||||
BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
|
||||
BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
|
||||
BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
|
||||
BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
|
||||
BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \
|
||||
BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \
|
||||
BIT(POWER_DOMAIN_AUX_B) | \
|
||||
BIT(POWER_DOMAIN_AUX_C) | \
|
||||
BIT(POWER_DOMAIN_AUDIO) | \
|
||||
BIT(POWER_DOMAIN_VGA) | \
|
||||
BIT(POWER_DOMAIN_GMBUS) | \
|
||||
BIT(POWER_DOMAIN_INIT))
|
||||
#define BXT_DISPLAY_POWERWELL_1_POWER_DOMAINS ( \
|
||||
BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
|
||||
BIT(POWER_DOMAIN_PIPE_A) | \
|
||||
BIT(POWER_DOMAIN_TRANSCODER_EDP) | \
|
||||
BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \
|
||||
BIT(POWER_DOMAIN_PORT_DDI_A_2_LANES) | \
|
||||
BIT(POWER_DOMAIN_PORT_DDI_A_4_LANES) | \
|
||||
BIT(POWER_DOMAIN_PORT_DDI_A_LANES) | \
|
||||
BIT(POWER_DOMAIN_AUX_A) | \
|
||||
BIT(POWER_DOMAIN_PLLS) | \
|
||||
BIT(POWER_DOMAIN_INIT))
|
||||
#define BXT_DISPLAY_DC_OFF_POWER_DOMAINS ( \
|
||||
BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
|
||||
BIT(POWER_DOMAIN_MODESET) | \
|
||||
BIT(POWER_DOMAIN_AUX_A) | \
|
||||
BIT(POWER_DOMAIN_INIT))
|
||||
#define BXT_DISPLAY_ALWAYS_ON_POWER_DOMAINS ( \
|
||||
(POWER_DOMAIN_MASK & ~(BXT_DISPLAY_POWERWELL_1_POWER_DOMAINS | \
|
||||
BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS)) | \
|
||||
@ -416,34 +390,6 @@ static void assert_can_disable_dc9(struct drm_i915_private *dev_priv)
|
||||
*/
|
||||
}
|
||||
|
||||
void bxt_enable_dc9(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
uint32_t val;
|
||||
|
||||
assert_can_enable_dc9(dev_priv);
|
||||
|
||||
DRM_DEBUG_KMS("Enabling DC9\n");
|
||||
|
||||
val = I915_READ(DC_STATE_EN);
|
||||
val |= DC_STATE_EN_DC9;
|
||||
I915_WRITE(DC_STATE_EN, val);
|
||||
POSTING_READ(DC_STATE_EN);
|
||||
}
|
||||
|
||||
void bxt_disable_dc9(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
uint32_t val;
|
||||
|
||||
assert_can_disable_dc9(dev_priv);
|
||||
|
||||
DRM_DEBUG_KMS("Disabling DC9\n");
|
||||
|
||||
val = I915_READ(DC_STATE_EN);
|
||||
val &= ~DC_STATE_EN_DC9;
|
||||
I915_WRITE(DC_STATE_EN, val);
|
||||
POSTING_READ(DC_STATE_EN);
|
||||
}
|
||||
|
||||
static void gen9_set_dc_state_debugmask_memory_up(
|
||||
struct drm_i915_private *dev_priv)
|
||||
{
|
||||
@ -458,6 +404,62 @@ static void gen9_set_dc_state_debugmask_memory_up(
|
||||
}
|
||||
}
|
||||
|
||||
static void gen9_set_dc_state(struct drm_i915_private *dev_priv, uint32_t state)
|
||||
{
|
||||
uint32_t val;
|
||||
uint32_t mask;
|
||||
|
||||
mask = DC_STATE_EN_UPTO_DC5;
|
||||
if (IS_BROXTON(dev_priv))
|
||||
mask |= DC_STATE_EN_DC9;
|
||||
else
|
||||
mask |= DC_STATE_EN_UPTO_DC6;
|
||||
|
||||
WARN_ON_ONCE(state & ~mask);
|
||||
|
||||
if (i915.enable_dc == 0)
|
||||
state = DC_STATE_DISABLE;
|
||||
else if (i915.enable_dc == 1 && state > DC_STATE_EN_UPTO_DC5)
|
||||
state = DC_STATE_EN_UPTO_DC5;
|
||||
|
||||
if (state & DC_STATE_EN_UPTO_DC5_DC6_MASK)
|
||||
gen9_set_dc_state_debugmask_memory_up(dev_priv);
|
||||
|
||||
val = I915_READ(DC_STATE_EN);
|
||||
DRM_DEBUG_KMS("Setting DC state from %02x to %02x\n",
|
||||
val & mask, state);
|
||||
val &= ~mask;
|
||||
val |= state;
|
||||
I915_WRITE(DC_STATE_EN, val);
|
||||
POSTING_READ(DC_STATE_EN);
|
||||
}
|
||||
|
||||
void bxt_enable_dc9(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
assert_can_enable_dc9(dev_priv);
|
||||
|
||||
DRM_DEBUG_KMS("Enabling DC9\n");
|
||||
|
||||
gen9_set_dc_state(dev_priv, DC_STATE_EN_DC9);
|
||||
}
|
||||
|
||||
void bxt_disable_dc9(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
assert_can_disable_dc9(dev_priv);
|
||||
|
||||
DRM_DEBUG_KMS("Disabling DC9\n");
|
||||
|
||||
gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
|
||||
}
|
||||
|
||||
static void assert_csr_loaded(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
WARN_ONCE(!I915_READ(CSR_PROGRAM(0)),
|
||||
"CSR program storage start is NULL\n");
|
||||
WARN_ONCE(!I915_READ(CSR_SSP_BASE), "CSR SSP Base Not fine\n");
|
||||
WARN_ONCE(!I915_READ(CSR_HTP_SKL), "CSR HTP Not fine\n");
|
||||
}
|
||||
|
||||
static void assert_can_enable_dc5(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
@ -478,8 +480,6 @@ static void assert_can_enable_dc5(struct drm_i915_private *dev_priv)
|
||||
|
||||
static void assert_can_disable_dc5(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
bool pg2_enabled = intel_display_power_well_is_enabled(dev_priv,
|
||||
SKL_DISP_PW_2);
|
||||
/*
|
||||
* During initialization, the firmware may not be loaded yet.
|
||||
* We still want to make sure that the DC enabling flag is cleared.
|
||||
@ -487,40 +487,17 @@ static void assert_can_disable_dc5(struct drm_i915_private *dev_priv)
|
||||
if (dev_priv->power_domains.initializing)
|
||||
return;
|
||||
|
||||
WARN_ONCE(!pg2_enabled, "PG2 not enabled to disable DC5.\n");
|
||||
WARN_ONCE(dev_priv->pm.suspended,
|
||||
"Disabling of DC5 while platform is runtime-suspended should never happen.\n");
|
||||
}
|
||||
|
||||
static void gen9_enable_dc5(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
uint32_t val;
|
||||
|
||||
assert_can_enable_dc5(dev_priv);
|
||||
|
||||
DRM_DEBUG_KMS("Enabling DC5\n");
|
||||
|
||||
gen9_set_dc_state_debugmask_memory_up(dev_priv);
|
||||
|
||||
val = I915_READ(DC_STATE_EN);
|
||||
val &= ~DC_STATE_EN_UPTO_DC5_DC6_MASK;
|
||||
val |= DC_STATE_EN_UPTO_DC5;
|
||||
I915_WRITE(DC_STATE_EN, val);
|
||||
POSTING_READ(DC_STATE_EN);
|
||||
}
|
||||
|
||||
static void gen9_disable_dc5(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
uint32_t val;
|
||||
|
||||
assert_can_disable_dc5(dev_priv);
|
||||
|
||||
DRM_DEBUG_KMS("Disabling DC5\n");
|
||||
|
||||
val = I915_READ(DC_STATE_EN);
|
||||
val &= ~DC_STATE_EN_UPTO_DC5;
|
||||
I915_WRITE(DC_STATE_EN, val);
|
||||
POSTING_READ(DC_STATE_EN);
|
||||
gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC5);
|
||||
}
|
||||
|
||||
static void assert_can_enable_dc6(struct drm_i915_private *dev_priv)
|
||||
@ -546,40 +523,37 @@ static void assert_can_disable_dc6(struct drm_i915_private *dev_priv)
|
||||
if (dev_priv->power_domains.initializing)
|
||||
return;
|
||||
|
||||
assert_csr_loaded(dev_priv);
|
||||
WARN_ONCE(!(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC6),
|
||||
"DC6 already programmed to be disabled.\n");
|
||||
}
|
||||
|
||||
static void skl_enable_dc6(struct drm_i915_private *dev_priv)
|
||||
static void gen9_disable_dc5_dc6(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
uint32_t val;
|
||||
assert_can_disable_dc5(dev_priv);
|
||||
|
||||
if (IS_SKYLAKE(dev_priv) && i915.enable_dc != 0 && i915.enable_dc != 1)
|
||||
assert_can_disable_dc6(dev_priv);
|
||||
|
||||
gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
|
||||
}
|
||||
|
||||
void skl_enable_dc6(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
assert_can_enable_dc6(dev_priv);
|
||||
|
||||
DRM_DEBUG_KMS("Enabling DC6\n");
|
||||
|
||||
gen9_set_dc_state_debugmask_memory_up(dev_priv);
|
||||
gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
|
||||
|
||||
val = I915_READ(DC_STATE_EN);
|
||||
val &= ~DC_STATE_EN_UPTO_DC5_DC6_MASK;
|
||||
val |= DC_STATE_EN_UPTO_DC6;
|
||||
I915_WRITE(DC_STATE_EN, val);
|
||||
POSTING_READ(DC_STATE_EN);
|
||||
}
|
||||
|
||||
static void skl_disable_dc6(struct drm_i915_private *dev_priv)
|
||||
void skl_disable_dc6(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
uint32_t val;
|
||||
|
||||
assert_can_disable_dc6(dev_priv);
|
||||
|
||||
DRM_DEBUG_KMS("Disabling DC6\n");
|
||||
|
||||
val = I915_READ(DC_STATE_EN);
|
||||
val &= ~DC_STATE_EN_UPTO_DC6;
|
||||
I915_WRITE(DC_STATE_EN, val);
|
||||
POSTING_READ(DC_STATE_EN);
|
||||
gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
|
||||
}
|
||||
|
||||
static void skl_set_power_well(struct drm_i915_private *dev_priv,
|
||||
@ -629,20 +603,16 @@ static void skl_set_power_well(struct drm_i915_private *dev_priv,
|
||||
!I915_READ(HSW_PWR_WELL_BIOS),
|
||||
"Invalid for power well status to be enabled, unless done by the BIOS, \
|
||||
when request is to disable!\n");
|
||||
if ((GEN9_ENABLE_DC5(dev) || SKL_ENABLE_DC6(dev)) &&
|
||||
power_well->data == SKL_DISP_PW_2) {
|
||||
if (SKL_ENABLE_DC6(dev)) {
|
||||
skl_disable_dc6(dev_priv);
|
||||
/*
|
||||
* DDI buffer programming unnecessary during driver-load/resume
|
||||
* as it's already done during modeset initialization then.
|
||||
* It's also invalid here as encoder list is still uninitialized.
|
||||
*/
|
||||
if (!dev_priv->power_domains.initializing)
|
||||
intel_prepare_ddi(dev);
|
||||
} else {
|
||||
gen9_disable_dc5(dev_priv);
|
||||
}
|
||||
if (power_well->data == SKL_DISP_PW_2) {
|
||||
/*
|
||||
* DDI buffer programming unnecessary during
|
||||
* driver-load/resume as it's already done
|
||||
* during modeset initialization then. It's
|
||||
* also invalid here as encoder list is still
|
||||
* uninitialized.
|
||||
*/
|
||||
if (!dev_priv->power_domains.initializing)
|
||||
intel_prepare_ddi(dev);
|
||||
}
|
||||
I915_WRITE(HSW_PWR_WELL_DRIVER, tmp | req_mask);
|
||||
}
|
||||
@ -657,34 +627,9 @@ static void skl_set_power_well(struct drm_i915_private *dev_priv,
|
||||
}
|
||||
} else {
|
||||
if (enable_requested) {
|
||||
if (IS_SKYLAKE(dev) &&
|
||||
(power_well->data == SKL_DISP_PW_1) &&
|
||||
(intel_csr_load_status_get(dev_priv) == FW_LOADED))
|
||||
DRM_DEBUG_KMS("Not Disabling PW1, dmc will handle\n");
|
||||
else {
|
||||
I915_WRITE(HSW_PWR_WELL_DRIVER, tmp & ~req_mask);
|
||||
POSTING_READ(HSW_PWR_WELL_DRIVER);
|
||||
DRM_DEBUG_KMS("Disabling %s\n", power_well->name);
|
||||
}
|
||||
|
||||
if ((GEN9_ENABLE_DC5(dev) || SKL_ENABLE_DC6(dev)) &&
|
||||
power_well->data == SKL_DISP_PW_2) {
|
||||
enum csr_state state;
|
||||
/* TODO: wait for a completion event or
|
||||
* similar here instead of busy
|
||||
* waiting using wait_for function.
|
||||
*/
|
||||
wait_for((state = intel_csr_load_status_get(dev_priv)) !=
|
||||
FW_UNINITIALIZED, 1000);
|
||||
if (state != FW_LOADED)
|
||||
DRM_DEBUG("CSR firmware not ready (%d)\n",
|
||||
state);
|
||||
else
|
||||
if (SKL_ENABLE_DC6(dev))
|
||||
skl_enable_dc6(dev_priv);
|
||||
else
|
||||
gen9_enable_dc5(dev_priv);
|
||||
}
|
||||
I915_WRITE(HSW_PWR_WELL_DRIVER, tmp & ~req_mask);
|
||||
POSTING_READ(HSW_PWR_WELL_DRIVER);
|
||||
DRM_DEBUG_KMS("Disabling %s\n", power_well->name);
|
||||
}
|
||||
}
|
||||
|
||||
@ -759,6 +704,41 @@ static void skl_power_well_disable(struct drm_i915_private *dev_priv,
|
||||
skl_set_power_well(dev_priv, power_well, false);
|
||||
}
|
||||
|
||||
static bool gen9_dc_off_power_well_enabled(struct drm_i915_private *dev_priv,
|
||||
struct i915_power_well *power_well)
|
||||
{
|
||||
return (I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5_DC6_MASK) == 0;
|
||||
}
|
||||
|
||||
static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv,
|
||||
struct i915_power_well *power_well)
|
||||
{
|
||||
gen9_disable_dc5_dc6(dev_priv);
|
||||
}
|
||||
|
||||
static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv,
|
||||
struct i915_power_well *power_well)
|
||||
{
|
||||
if (IS_SKYLAKE(dev_priv) && i915.enable_dc != 0 && i915.enable_dc != 1)
|
||||
skl_enable_dc6(dev_priv);
|
||||
else
|
||||
gen9_enable_dc5(dev_priv);
|
||||
}
|
||||
|
||||
static void gen9_dc_off_power_well_sync_hw(struct drm_i915_private *dev_priv,
|
||||
struct i915_power_well *power_well)
|
||||
{
|
||||
if (power_well->count > 0) {
|
||||
gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
|
||||
} else {
|
||||
if (IS_SKYLAKE(dev_priv) && i915.enable_dc != 0 &&
|
||||
i915.enable_dc != 1)
|
||||
gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
|
||||
else
|
||||
gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC5);
|
||||
}
|
||||
}
|
||||
|
||||
static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv,
|
||||
struct i915_power_well *power_well)
|
||||
{
|
||||
@ -973,10 +953,12 @@ static struct i915_power_well *lookup_power_well(struct drm_i915_private *dev_pr
|
||||
int power_well_id)
|
||||
{
|
||||
struct i915_power_domains *power_domains = &dev_priv->power_domains;
|
||||
struct i915_power_well *power_well;
|
||||
int i;
|
||||
|
||||
for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains) {
|
||||
for (i = 0; i < power_domains->power_well_count; i++) {
|
||||
struct i915_power_well *power_well;
|
||||
|
||||
power_well = &power_domains->power_wells[i];
|
||||
if (power_well->data == power_well_id)
|
||||
return power_well;
|
||||
}
|
||||
@ -1457,7 +1439,7 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
|
||||
for_each_power_well_rev(i, power_well, BIT(domain), power_domains) {
|
||||
WARN_ON(!power_well->count);
|
||||
|
||||
if (!--power_well->count && i915.disable_power_well)
|
||||
if (!--power_well->count)
|
||||
intel_power_well_disable(dev_priv, power_well);
|
||||
}
|
||||
|
||||
@ -1469,20 +1451,17 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
|
||||
#define HSW_ALWAYS_ON_POWER_DOMAINS ( \
|
||||
BIT(POWER_DOMAIN_PIPE_A) | \
|
||||
BIT(POWER_DOMAIN_TRANSCODER_EDP) | \
|
||||
BIT(POWER_DOMAIN_PORT_DDI_A_2_LANES) | \
|
||||
BIT(POWER_DOMAIN_PORT_DDI_A_4_LANES) | \
|
||||
BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
|
||||
BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
|
||||
BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
|
||||
BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
|
||||
BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \
|
||||
BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
|
||||
BIT(POWER_DOMAIN_PORT_DDI_A_LANES) | \
|
||||
BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \
|
||||
BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \
|
||||
BIT(POWER_DOMAIN_PORT_DDI_D_LANES) | \
|
||||
BIT(POWER_DOMAIN_PORT_CRT) | \
|
||||
BIT(POWER_DOMAIN_PLLS) | \
|
||||
BIT(POWER_DOMAIN_AUX_A) | \
|
||||
BIT(POWER_DOMAIN_AUX_B) | \
|
||||
BIT(POWER_DOMAIN_AUX_C) | \
|
||||
BIT(POWER_DOMAIN_AUX_D) | \
|
||||
BIT(POWER_DOMAIN_GMBUS) | \
|
||||
BIT(POWER_DOMAIN_INIT))
|
||||
#define HSW_DISPLAY_POWER_DOMAINS ( \
|
||||
(POWER_DOMAIN_MASK & ~HSW_ALWAYS_ON_POWER_DOMAINS) | \
|
||||
@ -1499,49 +1478,42 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
|
||||
#define VLV_DISPLAY_POWER_DOMAINS POWER_DOMAIN_MASK
|
||||
|
||||
#define VLV_DPIO_CMN_BC_POWER_DOMAINS ( \
|
||||
BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
|
||||
BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
|
||||
BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
|
||||
BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
|
||||
BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \
|
||||
BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \
|
||||
BIT(POWER_DOMAIN_PORT_CRT) | \
|
||||
BIT(POWER_DOMAIN_AUX_B) | \
|
||||
BIT(POWER_DOMAIN_AUX_C) | \
|
||||
BIT(POWER_DOMAIN_INIT))
|
||||
|
||||
#define VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS ( \
|
||||
BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
|
||||
BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
|
||||
BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \
|
||||
BIT(POWER_DOMAIN_AUX_B) | \
|
||||
BIT(POWER_DOMAIN_INIT))
|
||||
|
||||
#define VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS ( \
|
||||
BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
|
||||
BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \
|
||||
BIT(POWER_DOMAIN_AUX_B) | \
|
||||
BIT(POWER_DOMAIN_INIT))
|
||||
|
||||
#define VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS ( \
|
||||
BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
|
||||
BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
|
||||
BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \
|
||||
BIT(POWER_DOMAIN_AUX_C) | \
|
||||
BIT(POWER_DOMAIN_INIT))
|
||||
|
||||
#define VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS ( \
|
||||
BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
|
||||
BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \
|
||||
BIT(POWER_DOMAIN_AUX_C) | \
|
||||
BIT(POWER_DOMAIN_INIT))
|
||||
|
||||
#define CHV_DPIO_CMN_BC_POWER_DOMAINS ( \
|
||||
BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
|
||||
BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
|
||||
BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
|
||||
BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
|
||||
BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \
|
||||
BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \
|
||||
BIT(POWER_DOMAIN_AUX_B) | \
|
||||
BIT(POWER_DOMAIN_AUX_C) | \
|
||||
BIT(POWER_DOMAIN_INIT))
|
||||
|
||||
#define CHV_DPIO_CMN_D_POWER_DOMAINS ( \
|
||||
BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \
|
||||
BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
|
||||
BIT(POWER_DOMAIN_PORT_DDI_D_LANES) | \
|
||||
BIT(POWER_DOMAIN_AUX_D) | \
|
||||
BIT(POWER_DOMAIN_INIT))
|
||||
|
||||
@ -1589,6 +1561,13 @@ static const struct i915_power_well_ops skl_power_well_ops = {
|
||||
.is_enabled = skl_power_well_enabled,
|
||||
};
|
||||
|
||||
static const struct i915_power_well_ops gen9_dc_off_power_well_ops = {
|
||||
.sync_hw = gen9_dc_off_power_well_sync_hw,
|
||||
.enable = gen9_dc_off_power_well_enable,
|
||||
.disable = gen9_dc_off_power_well_disable,
|
||||
.is_enabled = gen9_dc_off_power_well_enabled,
|
||||
};
|
||||
|
||||
static struct i915_power_well hsw_power_wells[] = {
|
||||
{
|
||||
.name = "always-on",
|
||||
@ -1644,6 +1623,7 @@ static struct i915_power_well vlv_power_wells[] = {
|
||||
.always_on = 1,
|
||||
.domains = VLV_ALWAYS_ON_POWER_DOMAINS,
|
||||
.ops = &i9xx_always_on_power_well_ops,
|
||||
.data = PUNIT_POWER_WELL_ALWAYS_ON,
|
||||
},
|
||||
{
|
||||
.name = "display",
|
||||
@ -1745,19 +1725,28 @@ static struct i915_power_well skl_power_wells[] = {
|
||||
.always_on = 1,
|
||||
.domains = SKL_DISPLAY_ALWAYS_ON_POWER_DOMAINS,
|
||||
.ops = &i9xx_always_on_power_well_ops,
|
||||
.data = SKL_DISP_PW_ALWAYS_ON,
|
||||
},
|
||||
{
|
||||
.name = "power well 1",
|
||||
.domains = SKL_DISPLAY_POWERWELL_1_POWER_DOMAINS,
|
||||
/* Handled by the DMC firmware */
|
||||
.domains = 0,
|
||||
.ops = &skl_power_well_ops,
|
||||
.data = SKL_DISP_PW_1,
|
||||
},
|
||||
{
|
||||
.name = "MISC IO power well",
|
||||
.domains = SKL_DISPLAY_MISC_IO_POWER_DOMAINS,
|
||||
/* Handled by the DMC firmware */
|
||||
.domains = 0,
|
||||
.ops = &skl_power_well_ops,
|
||||
.data = SKL_DISP_PW_MISC_IO,
|
||||
},
|
||||
{
|
||||
.name = "DC off",
|
||||
.domains = SKL_DISPLAY_DC_OFF_POWER_DOMAINS,
|
||||
.ops = &gen9_dc_off_power_well_ops,
|
||||
.data = SKL_DISP_PW_DC_OFF,
|
||||
},
|
||||
{
|
||||
.name = "power well 2",
|
||||
.domains = SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS,
|
||||
@ -1790,6 +1779,34 @@ static struct i915_power_well skl_power_wells[] = {
|
||||
},
|
||||
};
|
||||
|
||||
void skl_pw1_misc_io_init(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct i915_power_well *well;
|
||||
|
||||
if (!IS_SKYLAKE(dev_priv))
|
||||
return;
|
||||
|
||||
well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
|
||||
intel_power_well_enable(dev_priv, well);
|
||||
|
||||
well = lookup_power_well(dev_priv, SKL_DISP_PW_MISC_IO);
|
||||
intel_power_well_enable(dev_priv, well);
|
||||
}
|
||||
|
||||
void skl_pw1_misc_io_fini(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct i915_power_well *well;
|
||||
|
||||
if (!IS_SKYLAKE(dev_priv))
|
||||
return;
|
||||
|
||||
well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
|
||||
intel_power_well_disable(dev_priv, well);
|
||||
|
||||
well = lookup_power_well(dev_priv, SKL_DISP_PW_MISC_IO);
|
||||
intel_power_well_disable(dev_priv, well);
|
||||
}
|
||||
|
||||
static struct i915_power_well bxt_power_wells[] = {
|
||||
{
|
||||
.name = "always-on",
|
||||
@ -1803,12 +1820,18 @@ static struct i915_power_well bxt_power_wells[] = {
|
||||
.ops = &skl_power_well_ops,
|
||||
.data = SKL_DISP_PW_1,
|
||||
},
|
||||
{
|
||||
.name = "DC off",
|
||||
.domains = BXT_DISPLAY_DC_OFF_POWER_DOMAINS,
|
||||
.ops = &gen9_dc_off_power_well_ops,
|
||||
.data = SKL_DISP_PW_DC_OFF,
|
||||
},
|
||||
{
|
||||
.name = "power well 2",
|
||||
.domains = BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS,
|
||||
.ops = &skl_power_well_ops,
|
||||
.data = SKL_DISP_PW_2,
|
||||
}
|
||||
},
|
||||
};
|
||||
|
||||
static int
|
||||
@ -1845,6 +1868,8 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv)
|
||||
i915.disable_power_well = sanitize_disable_power_well_option(dev_priv,
|
||||
i915.disable_power_well);
|
||||
|
||||
BUILD_BUG_ON(POWER_DOMAIN_NUM > 31);
|
||||
|
||||
mutex_init(&power_domains->lock);
|
||||
|
||||
/*
|
||||
@ -1855,7 +1880,7 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv)
|
||||
set_power_wells(power_domains, hsw_power_wells);
|
||||
} else if (IS_BROADWELL(dev_priv->dev)) {
|
||||
set_power_wells(power_domains, bdw_power_wells);
|
||||
} else if (IS_SKYLAKE(dev_priv->dev)) {
|
||||
} else if (IS_SKYLAKE(dev_priv->dev) || IS_KABYLAKE(dev_priv->dev)) {
|
||||
set_power_wells(power_domains, skl_power_wells);
|
||||
} else if (IS_BROXTON(dev_priv->dev)) {
|
||||
set_power_wells(power_domains, bxt_power_wells);
|
||||
@ -1870,21 +1895,6 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void intel_runtime_pm_disable(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
struct device *device = &dev->pdev->dev;
|
||||
|
||||
if (!HAS_RUNTIME_PM(dev))
|
||||
return;
|
||||
|
||||
if (!intel_enable_rc6(dev))
|
||||
return;
|
||||
|
||||
/* Make sure we're not suspended first. */
|
||||
pm_runtime_get_sync(device);
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_power_domains_fini - finalizes the power domain structures
|
||||
* @dev_priv: i915 device instance
|
||||
@ -1895,15 +1905,17 @@ static void intel_runtime_pm_disable(struct drm_i915_private *dev_priv)
|
||||
*/
|
||||
void intel_power_domains_fini(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
intel_runtime_pm_disable(dev_priv);
|
||||
|
||||
/* The i915.ko module is still not prepared to be loaded when
|
||||
* the power well is not enabled, so just enable it in case
|
||||
* we're going to unload/reload. */
|
||||
intel_display_set_init_power(dev_priv, true);
|
||||
|
||||
/* Remove the refcount we took to keep power well support disabled. */
|
||||
if (!i915.disable_power_well)
|
||||
intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
|
||||
}
|
||||
|
||||
static void intel_power_domains_resume(struct drm_i915_private *dev_priv)
|
||||
static void intel_power_domains_sync_hw(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct i915_power_domains *power_domains = &dev_priv->power_domains;
|
||||
struct i915_power_well *power_well;
|
||||
@ -1918,6 +1930,47 @@ static void intel_power_domains_resume(struct drm_i915_private *dev_priv)
|
||||
mutex_unlock(&power_domains->lock);
|
||||
}
|
||||
|
||||
static void skl_display_core_init(struct drm_i915_private *dev_priv,
|
||||
bool resume)
|
||||
{
|
||||
struct i915_power_domains *power_domains = &dev_priv->power_domains;
|
||||
uint32_t val;
|
||||
|
||||
gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
|
||||
|
||||
/* enable PCH reset handshake */
|
||||
val = I915_READ(HSW_NDE_RSTWRN_OPT);
|
||||
I915_WRITE(HSW_NDE_RSTWRN_OPT, val | RESET_PCH_HANDSHAKE_ENABLE);
|
||||
|
||||
/* enable PG1 and Misc I/O */
|
||||
mutex_lock(&power_domains->lock);
|
||||
skl_pw1_misc_io_init(dev_priv);
|
||||
mutex_unlock(&power_domains->lock);
|
||||
|
||||
if (!resume)
|
||||
return;
|
||||
|
||||
skl_init_cdclk(dev_priv);
|
||||
|
||||
if (dev_priv->csr.dmc_payload)
|
||||
intel_csr_load_program(dev_priv);
|
||||
}
|
||||
|
||||
static void skl_display_core_uninit(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct i915_power_domains *power_domains = &dev_priv->power_domains;
|
||||
|
||||
gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
|
||||
|
||||
skl_uninit_cdclk(dev_priv);
|
||||
|
||||
/* The spec doesn't call for removing the reset handshake flag */
|
||||
/* disable PG1 and Misc I/O */
|
||||
mutex_lock(&power_domains->lock);
|
||||
skl_pw1_misc_io_fini(dev_priv);
|
||||
mutex_unlock(&power_domains->lock);
|
||||
}
|
||||
|
||||
static void chv_phy_control_init(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct i915_power_well *cmn_bc =
|
||||
@ -2040,14 +2093,16 @@ static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv)
|
||||
* This function initializes the hardware power domain state and enables all
|
||||
* power domains using intel_display_set_init_power().
|
||||
*/
|
||||
void intel_power_domains_init_hw(struct drm_i915_private *dev_priv)
|
||||
void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume)
|
||||
{
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
struct i915_power_domains *power_domains = &dev_priv->power_domains;
|
||||
|
||||
power_domains->initializing = true;
|
||||
|
||||
if (IS_CHERRYVIEW(dev)) {
|
||||
if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
|
||||
skl_display_core_init(dev_priv, resume);
|
||||
} else if (IS_CHERRYVIEW(dev)) {
|
||||
mutex_lock(&power_domains->lock);
|
||||
chv_phy_control_init(dev_priv);
|
||||
mutex_unlock(&power_domains->lock);
|
||||
@ -2059,38 +2114,31 @@ void intel_power_domains_init_hw(struct drm_i915_private *dev_priv)
|
||||
|
||||
/* For now, we need the power well to be always enabled. */
|
||||
intel_display_set_init_power(dev_priv, true);
|
||||
intel_power_domains_resume(dev_priv);
|
||||
/* Disable power support if the user asked so. */
|
||||
if (!i915.disable_power_well)
|
||||
intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
|
||||
intel_power_domains_sync_hw(dev_priv);
|
||||
power_domains->initializing = false;
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_aux_display_runtime_get - grab an auxiliary power domain reference
|
||||
* intel_power_domains_suspend - suspend power domain state
|
||||
* @dev_priv: i915 device instance
|
||||
*
|
||||
* This function grabs a power domain reference for the auxiliary power domain
|
||||
* (for access to the GMBUS and DP AUX blocks) and ensures that it and all its
|
||||
* parents are powered up. Therefore users should only grab a reference to the
|
||||
* innermost power domain they need.
|
||||
*
|
||||
* Any power domain reference obtained by this function must have a symmetric
|
||||
* call to intel_aux_display_runtime_put() to release the reference again.
|
||||
* This function prepares the hardware power domain state before entering
|
||||
* system suspend. It must be paired with intel_power_domains_init_hw().
|
||||
*/
|
||||
void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv)
|
||||
void intel_power_domains_suspend(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
intel_runtime_pm_get(dev_priv);
|
||||
}
|
||||
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
|
||||
skl_display_core_uninit(dev_priv);
|
||||
|
||||
/**
|
||||
* intel_aux_display_runtime_put - release an auxiliary power domain reference
|
||||
* @dev_priv: i915 device instance
|
||||
*
|
||||
* This function drops the auxiliary power domain reference obtained by
|
||||
* intel_aux_display_runtime_get() and might power down the corresponding
|
||||
* hardware block right away if this is the last reference.
|
||||
*/
|
||||
void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
intel_runtime_pm_put(dev_priv);
|
||||
/*
|
||||
* Even if power well support was disabled we still want to disable
|
||||
* power wells while we are system suspended.
|
||||
*/
|
||||
if (!i915.disable_power_well)
|
||||
intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -74,7 +74,7 @@ struct intel_sdvo {
|
||||
struct i2c_adapter ddc;
|
||||
|
||||
/* Register for the SDVO device: SDVOB or SDVOC */
|
||||
uint32_t sdvo_reg;
|
||||
i915_reg_t sdvo_reg;
|
||||
|
||||
/* Active outputs controlled by this SDVO output */
|
||||
uint16_t controlled_output;
|
||||
@ -120,8 +120,7 @@ struct intel_sdvo {
|
||||
*/
|
||||
bool is_tv;
|
||||
|
||||
/* On different gens SDVOB is at different places. */
|
||||
bool is_sdvob;
|
||||
enum port port;
|
||||
|
||||
/* This is for current tv format name */
|
||||
int tv_format_index;
|
||||
@ -245,7 +244,7 @@ static void intel_sdvo_write_sdvox(struct intel_sdvo *intel_sdvo, u32 val)
|
||||
u32 bval = val, cval = val;
|
||||
int i;
|
||||
|
||||
if (intel_sdvo->sdvo_reg == PCH_SDVOB) {
|
||||
if (HAS_PCH_SPLIT(dev_priv)) {
|
||||
I915_WRITE(intel_sdvo->sdvo_reg, val);
|
||||
POSTING_READ(intel_sdvo->sdvo_reg);
|
||||
/*
|
||||
@ -259,7 +258,7 @@ static void intel_sdvo_write_sdvox(struct intel_sdvo *intel_sdvo, u32 val)
|
||||
return;
|
||||
}
|
||||
|
||||
if (intel_sdvo->sdvo_reg == GEN3_SDVOB)
|
||||
if (intel_sdvo->port == PORT_B)
|
||||
cval = I915_READ(GEN3_SDVOC);
|
||||
else
|
||||
bval = I915_READ(GEN3_SDVOB);
|
||||
@ -422,7 +421,7 @@ static const struct _sdvo_cmd_name {
|
||||
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_DATA),
|
||||
};
|
||||
|
||||
#define SDVO_NAME(svdo) ((svdo)->is_sdvob ? "SDVOB" : "SDVOC")
|
||||
#define SDVO_NAME(svdo) ((svdo)->port == PORT_B ? "SDVOB" : "SDVOC")
|
||||
|
||||
static void intel_sdvo_debug_write(struct intel_sdvo *intel_sdvo, u8 cmd,
|
||||
const void *args, int args_len)
|
||||
@ -1282,14 +1281,10 @@ static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder)
|
||||
sdvox |= SDVO_BORDER_ENABLE;
|
||||
} else {
|
||||
sdvox = I915_READ(intel_sdvo->sdvo_reg);
|
||||
switch (intel_sdvo->sdvo_reg) {
|
||||
case GEN3_SDVOB:
|
||||
if (intel_sdvo->port == PORT_B)
|
||||
sdvox &= SDVOB_PRESERVE_MASK;
|
||||
break;
|
||||
case GEN3_SDVOC:
|
||||
else
|
||||
sdvox &= SDVOC_PRESERVE_MASK;
|
||||
break;
|
||||
}
|
||||
sdvox |= (9 << 19) | SDVO_BORDER_ENABLE;
|
||||
}
|
||||
|
||||
@ -1464,12 +1459,23 @@ static void intel_disable_sdvo(struct intel_encoder *encoder)
|
||||
* matching DP port to be enabled on transcoder A.
|
||||
*/
|
||||
if (HAS_PCH_IBX(dev_priv) && crtc->pipe == PIPE_B) {
|
||||
/*
|
||||
* We get CPU/PCH FIFO underruns on the other pipe when
|
||||
* doing the workaround. Sweep them under the rug.
|
||||
*/
|
||||
intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, false);
|
||||
intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false);
|
||||
|
||||
temp &= ~SDVO_PIPE_B_SELECT;
|
||||
temp |= SDVO_ENABLE;
|
||||
intel_sdvo_write_sdvox(intel_sdvo, temp);
|
||||
|
||||
temp &= ~SDVO_ENABLE;
|
||||
intel_sdvo_write_sdvox(intel_sdvo, temp);
|
||||
|
||||
intel_wait_for_vblank_if_active(dev_priv->dev, PIPE_A);
|
||||
intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true);
|
||||
intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
|
||||
}
|
||||
}
|
||||
|
||||
@ -2251,7 +2257,7 @@ intel_sdvo_select_ddc_bus(struct drm_i915_private *dev_priv,
|
||||
{
|
||||
struct sdvo_device_mapping *mapping;
|
||||
|
||||
if (sdvo->is_sdvob)
|
||||
if (sdvo->port == PORT_B)
|
||||
mapping = &(dev_priv->sdvo_mappings[0]);
|
||||
else
|
||||
mapping = &(dev_priv->sdvo_mappings[1]);
|
||||
@ -2269,7 +2275,7 @@ intel_sdvo_select_i2c_bus(struct drm_i915_private *dev_priv,
|
||||
struct sdvo_device_mapping *mapping;
|
||||
u8 pin;
|
||||
|
||||
if (sdvo->is_sdvob)
|
||||
if (sdvo->port == PORT_B)
|
||||
mapping = &dev_priv->sdvo_mappings[0];
|
||||
else
|
||||
mapping = &dev_priv->sdvo_mappings[1];
|
||||
@ -2307,7 +2313,7 @@ intel_sdvo_get_slave_addr(struct drm_device *dev, struct intel_sdvo *sdvo)
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct sdvo_device_mapping *my_mapping, *other_mapping;
|
||||
|
||||
if (sdvo->is_sdvob) {
|
||||
if (sdvo->port == PORT_B) {
|
||||
my_mapping = &dev_priv->sdvo_mappings[0];
|
||||
other_mapping = &dev_priv->sdvo_mappings[1];
|
||||
} else {
|
||||
@ -2332,7 +2338,7 @@ intel_sdvo_get_slave_addr(struct drm_device *dev, struct intel_sdvo *sdvo)
|
||||
/* No SDVO device info is found for another DVO port,
|
||||
* so use mapping assumption we had before BIOS parsing.
|
||||
*/
|
||||
if (sdvo->is_sdvob)
|
||||
if (sdvo->port == PORT_B)
|
||||
return 0x70;
|
||||
else
|
||||
return 0x72;
|
||||
@ -2939,18 +2945,31 @@ intel_sdvo_init_ddc_proxy(struct intel_sdvo *sdvo,
|
||||
return i2c_add_adapter(&sdvo->ddc) == 0;
|
||||
}
|
||||
|
||||
bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob)
|
||||
static void assert_sdvo_port_valid(const struct drm_i915_private *dev_priv,
|
||||
enum port port)
|
||||
{
|
||||
if (HAS_PCH_SPLIT(dev_priv))
|
||||
WARN_ON(port != PORT_B);
|
||||
else
|
||||
WARN_ON(port != PORT_B && port != PORT_C);
|
||||
}
|
||||
|
||||
bool intel_sdvo_init(struct drm_device *dev,
|
||||
i915_reg_t sdvo_reg, enum port port)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_encoder *intel_encoder;
|
||||
struct intel_sdvo *intel_sdvo;
|
||||
int i;
|
||||
|
||||
assert_sdvo_port_valid(dev_priv, port);
|
||||
|
||||
intel_sdvo = kzalloc(sizeof(*intel_sdvo), GFP_KERNEL);
|
||||
if (!intel_sdvo)
|
||||
return false;
|
||||
|
||||
intel_sdvo->sdvo_reg = sdvo_reg;
|
||||
intel_sdvo->is_sdvob = is_sdvob;
|
||||
intel_sdvo->port = port;
|
||||
intel_sdvo->slave_addr = intel_sdvo_get_slave_addr(dev, intel_sdvo) >> 1;
|
||||
intel_sdvo_select_i2c_bus(dev_priv, intel_sdvo);
|
||||
if (!intel_sdvo_init_ddc_proxy(intel_sdvo, dev))
|
||||
@ -3000,8 +3019,10 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob)
|
||||
* hotplug lines.
|
||||
*/
|
||||
if (intel_sdvo->hotplug_active) {
|
||||
intel_encoder->hpd_pin =
|
||||
intel_sdvo->is_sdvob ? HPD_SDVO_B : HPD_SDVO_C;
|
||||
if (intel_sdvo->port == PORT_B)
|
||||
intel_encoder->hpd_pin = HPD_SDVO_B;
|
||||
else
|
||||
intel_encoder->hpd_pin = HPD_SDVO_C;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -192,10 +192,9 @@ skl_update_plane(struct drm_plane *drm_plane, struct drm_crtc *crtc,
|
||||
const int pipe = intel_plane->pipe;
|
||||
const int plane = intel_plane->plane + 1;
|
||||
u32 plane_ctl, stride_div, stride;
|
||||
int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
|
||||
const struct drm_intel_sprite_colorkey *key =
|
||||
&to_intel_plane_state(drm_plane->state)->ckey;
|
||||
unsigned long surf_addr;
|
||||
u32 surf_addr;
|
||||
u32 tile_height, plane_offset, plane_size;
|
||||
unsigned int rotation;
|
||||
int x_offset, y_offset;
|
||||
@ -212,10 +211,6 @@ skl_update_plane(struct drm_plane *drm_plane, struct drm_crtc *crtc,
|
||||
rotation = drm_plane->state->rotation;
|
||||
plane_ctl |= skl_plane_ctl_rotation(rotation);
|
||||
|
||||
intel_update_sprite_watermarks(drm_plane, crtc, src_w, src_h,
|
||||
pixel_size, true,
|
||||
src_w != crtc_w || src_h != crtc_h);
|
||||
|
||||
stride_div = intel_fb_stride_alignment(dev, fb->modifier[0],
|
||||
fb->pixel_format);
|
||||
|
||||
@ -297,8 +292,6 @@ skl_disable_plane(struct drm_plane *dplane, struct drm_crtc *crtc)
|
||||
|
||||
I915_WRITE(PLANE_SURF(pipe, plane), 0);
|
||||
POSTING_READ(PLANE_SURF(pipe, plane));
|
||||
|
||||
intel_update_sprite_watermarks(dplane, crtc, 0, 0, 0, false, false);
|
||||
}
|
||||
|
||||
static void
|
||||
@ -541,10 +534,6 @@ ivb_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
|
||||
if (IS_HASWELL(dev) || IS_BROADWELL(dev))
|
||||
sprctl |= SPRITE_PIPE_CSC_ENABLE;
|
||||
|
||||
intel_update_sprite_watermarks(plane, crtc, src_w, src_h, pixel_size,
|
||||
true,
|
||||
src_w != crtc_w || src_h != crtc_h);
|
||||
|
||||
/* Sizes are 0 based */
|
||||
src_w--;
|
||||
src_h--;
|
||||
@ -678,10 +667,6 @@ ilk_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
|
||||
if (IS_GEN6(dev))
|
||||
dvscntr |= DVS_TRICKLE_FEED_DISABLE; /* must disable */
|
||||
|
||||
intel_update_sprite_watermarks(plane, crtc, src_w, src_h,
|
||||
pixel_size, true,
|
||||
src_w != crtc_w || src_h != crtc_h);
|
||||
|
||||
/* Sizes are 0 based */
|
||||
src_w--;
|
||||
src_h--;
|
||||
@ -938,9 +923,6 @@ intel_commit_sprite_plane(struct drm_plane *plane,
|
||||
|
||||
crtc = crtc ? crtc : plane->crtc;
|
||||
|
||||
if (!crtc->state->active)
|
||||
return;
|
||||
|
||||
if (state->visible) {
|
||||
intel_plane->update_plane(plane, crtc, fb,
|
||||
state->dst.x1, state->dst.y1,
|
||||
|
@ -29,19 +29,7 @@
|
||||
|
||||
#define FORCEWAKE_ACK_TIMEOUT_MS 50
|
||||
|
||||
#define __raw_i915_read8(dev_priv__, reg__) readb((dev_priv__)->regs + (reg__))
|
||||
#define __raw_i915_write8(dev_priv__, reg__, val__) writeb(val__, (dev_priv__)->regs + (reg__))
|
||||
|
||||
#define __raw_i915_read16(dev_priv__, reg__) readw((dev_priv__)->regs + (reg__))
|
||||
#define __raw_i915_write16(dev_priv__, reg__, val__) writew(val__, (dev_priv__)->regs + (reg__))
|
||||
|
||||
#define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__))
|
||||
#define __raw_i915_write32(dev_priv__, reg__, val__) writel(val__, (dev_priv__)->regs + (reg__))
|
||||
|
||||
#define __raw_i915_read64(dev_priv__, reg__) readq((dev_priv__)->regs + (reg__))
|
||||
#define __raw_i915_write64(dev_priv__, reg__, val__) writeq(val__, (dev_priv__)->regs + (reg__))
|
||||
|
||||
#define __raw_posting_read(dev_priv__, reg__) (void)__raw_i915_read32(dev_priv__, reg__)
|
||||
#define __raw_posting_read(dev_priv__, reg__) (void)__raw_i915_read32((dev_priv__), (reg__))
|
||||
|
||||
static const char * const forcewake_domain_names[] = {
|
||||
"render",
|
||||
@ -72,7 +60,7 @@ assert_device_not_suspended(struct drm_i915_private *dev_priv)
|
||||
static inline void
|
||||
fw_domain_reset(const struct intel_uncore_forcewake_domain *d)
|
||||
{
|
||||
WARN_ON(d->reg_set == 0);
|
||||
WARN_ON(!i915_mmio_reg_valid(d->reg_set));
|
||||
__raw_i915_write32(d->i915, d->reg_set, d->val_reset);
|
||||
}
|
||||
|
||||
@ -118,7 +106,7 @@ static inline void
|
||||
fw_domain_posting_read(const struct intel_uncore_forcewake_domain *d)
|
||||
{
|
||||
/* something from same cacheline, but not from the set register */
|
||||
if (d->reg_post)
|
||||
if (i915_mmio_reg_valid(d->reg_post))
|
||||
__raw_posting_read(d->i915, d->reg_post);
|
||||
}
|
||||
|
||||
@ -525,8 +513,7 @@ void assert_forcewakes_inactive(struct drm_i915_private *dev_priv)
|
||||
}
|
||||
|
||||
/* We give fast paths for the really cool registers */
|
||||
#define NEEDS_FORCE_WAKE(reg) \
|
||||
((reg) < 0x40000 && (reg) != FORCEWAKE)
|
||||
#define NEEDS_FORCE_WAKE(reg) ((reg) < 0x40000)
|
||||
|
||||
#define REG_RANGE(reg, start, end) ((reg) >= (start) && (reg) < (end))
|
||||
|
||||
@ -589,7 +576,7 @@ void assert_forcewakes_inactive(struct drm_i915_private *dev_priv)
|
||||
REG_RANGE((reg), 0x9400, 0x9800)
|
||||
|
||||
#define FORCEWAKE_GEN9_BLITTER_RANGE_OFFSET(reg) \
|
||||
((reg) < 0x40000 &&\
|
||||
((reg) < 0x40000 && \
|
||||
!FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg) && \
|
||||
!FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg) && \
|
||||
!FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg) && \
|
||||
@ -605,8 +592,8 @@ ilk_dummy_write(struct drm_i915_private *dev_priv)
|
||||
}
|
||||
|
||||
static void
|
||||
hsw_unclaimed_reg_debug(struct drm_i915_private *dev_priv, u32 reg, bool read,
|
||||
bool before)
|
||||
hsw_unclaimed_reg_debug(struct drm_i915_private *dev_priv,
|
||||
i915_reg_t reg, bool read, bool before)
|
||||
{
|
||||
const char *op = read ? "reading" : "writing to";
|
||||
const char *when = before ? "before" : "after";
|
||||
@ -616,7 +603,7 @@ hsw_unclaimed_reg_debug(struct drm_i915_private *dev_priv, u32 reg, bool read,
|
||||
|
||||
if (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM) {
|
||||
WARN(1, "Unclaimed register detected %s %s register 0x%x\n",
|
||||
when, op, reg);
|
||||
when, op, i915_mmio_reg_offset(reg));
|
||||
__raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
|
||||
i915.mmio_debug--; /* Only report the first N failures */
|
||||
}
|
||||
@ -649,7 +636,7 @@ hsw_unclaimed_reg_detect(struct drm_i915_private *dev_priv)
|
||||
|
||||
#define __gen2_read(x) \
|
||||
static u##x \
|
||||
gen2_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
|
||||
gen2_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
|
||||
GEN2_READ_HEADER(x); \
|
||||
val = __raw_i915_read##x(dev_priv, reg); \
|
||||
GEN2_READ_FOOTER; \
|
||||
@ -657,7 +644,7 @@ gen2_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
|
||||
|
||||
#define __gen5_read(x) \
|
||||
static u##x \
|
||||
gen5_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
|
||||
gen5_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
|
||||
GEN2_READ_HEADER(x); \
|
||||
ilk_dummy_write(dev_priv); \
|
||||
val = __raw_i915_read##x(dev_priv, reg); \
|
||||
@ -680,6 +667,7 @@ __gen2_read(64)
|
||||
#undef GEN2_READ_HEADER
|
||||
|
||||
#define GEN6_READ_HEADER(x) \
|
||||
u32 offset = i915_mmio_reg_offset(reg); \
|
||||
unsigned long irqflags; \
|
||||
u##x val = 0; \
|
||||
assert_device_not_suspended(dev_priv); \
|
||||
@ -714,20 +702,12 @@ static inline void __force_wake_get(struct drm_i915_private *dev_priv,
|
||||
dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains);
|
||||
}
|
||||
|
||||
#define __vgpu_read(x) \
|
||||
static u##x \
|
||||
vgpu_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
|
||||
GEN6_READ_HEADER(x); \
|
||||
val = __raw_i915_read##x(dev_priv, reg); \
|
||||
GEN6_READ_FOOTER; \
|
||||
}
|
||||
|
||||
#define __gen6_read(x) \
|
||||
static u##x \
|
||||
gen6_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
|
||||
gen6_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
|
||||
GEN6_READ_HEADER(x); \
|
||||
hsw_unclaimed_reg_debug(dev_priv, reg, true, true); \
|
||||
if (NEEDS_FORCE_WAKE(reg)) \
|
||||
if (NEEDS_FORCE_WAKE(offset)) \
|
||||
__force_wake_get(dev_priv, FORCEWAKE_RENDER); \
|
||||
val = __raw_i915_read##x(dev_priv, reg); \
|
||||
hsw_unclaimed_reg_debug(dev_priv, reg, true, false); \
|
||||
@ -736,47 +716,56 @@ gen6_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
|
||||
|
||||
#define __vlv_read(x) \
|
||||
static u##x \
|
||||
vlv_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
|
||||
vlv_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
|
||||
enum forcewake_domains fw_engine = 0; \
|
||||
GEN6_READ_HEADER(x); \
|
||||
if (FORCEWAKE_VLV_RENDER_RANGE_OFFSET(reg)) \
|
||||
__force_wake_get(dev_priv, FORCEWAKE_RENDER); \
|
||||
else if (FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(reg)) \
|
||||
__force_wake_get(dev_priv, FORCEWAKE_MEDIA); \
|
||||
if (!NEEDS_FORCE_WAKE(offset)) \
|
||||
fw_engine = 0; \
|
||||
else if (FORCEWAKE_VLV_RENDER_RANGE_OFFSET(offset)) \
|
||||
fw_engine = FORCEWAKE_RENDER; \
|
||||
else if (FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(offset)) \
|
||||
fw_engine = FORCEWAKE_MEDIA; \
|
||||
if (fw_engine) \
|
||||
__force_wake_get(dev_priv, fw_engine); \
|
||||
val = __raw_i915_read##x(dev_priv, reg); \
|
||||
GEN6_READ_FOOTER; \
|
||||
}
|
||||
|
||||
#define __chv_read(x) \
|
||||
static u##x \
|
||||
chv_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
|
||||
chv_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
|
||||
enum forcewake_domains fw_engine = 0; \
|
||||
GEN6_READ_HEADER(x); \
|
||||
if (FORCEWAKE_CHV_RENDER_RANGE_OFFSET(reg)) \
|
||||
__force_wake_get(dev_priv, FORCEWAKE_RENDER); \
|
||||
else if (FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(reg)) \
|
||||
__force_wake_get(dev_priv, FORCEWAKE_MEDIA); \
|
||||
else if (FORCEWAKE_CHV_COMMON_RANGE_OFFSET(reg)) \
|
||||
__force_wake_get(dev_priv, \
|
||||
FORCEWAKE_RENDER | FORCEWAKE_MEDIA); \
|
||||
if (!NEEDS_FORCE_WAKE(offset)) \
|
||||
fw_engine = 0; \
|
||||
else if (FORCEWAKE_CHV_RENDER_RANGE_OFFSET(offset)) \
|
||||
fw_engine = FORCEWAKE_RENDER; \
|
||||
else if (FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(offset)) \
|
||||
fw_engine = FORCEWAKE_MEDIA; \
|
||||
else if (FORCEWAKE_CHV_COMMON_RANGE_OFFSET(offset)) \
|
||||
fw_engine = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \
|
||||
if (fw_engine) \
|
||||
__force_wake_get(dev_priv, fw_engine); \
|
||||
val = __raw_i915_read##x(dev_priv, reg); \
|
||||
GEN6_READ_FOOTER; \
|
||||
}
|
||||
|
||||
#define SKL_NEEDS_FORCE_WAKE(reg) \
|
||||
((reg) < 0x40000 && !FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg))
|
||||
((reg) < 0x40000 && !FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg))
|
||||
|
||||
#define __gen9_read(x) \
|
||||
static u##x \
|
||||
gen9_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
|
||||
gen9_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
|
||||
enum forcewake_domains fw_engine; \
|
||||
GEN6_READ_HEADER(x); \
|
||||
hsw_unclaimed_reg_debug(dev_priv, reg, true, true); \
|
||||
if (!SKL_NEEDS_FORCE_WAKE(reg)) \
|
||||
if (!SKL_NEEDS_FORCE_WAKE(offset)) \
|
||||
fw_engine = 0; \
|
||||
else if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg)) \
|
||||
else if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(offset)) \
|
||||
fw_engine = FORCEWAKE_RENDER; \
|
||||
else if (FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg)) \
|
||||
else if (FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(offset)) \
|
||||
fw_engine = FORCEWAKE_MEDIA; \
|
||||
else if (FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg)) \
|
||||
else if (FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(offset)) \
|
||||
fw_engine = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \
|
||||
else \
|
||||
fw_engine = FORCEWAKE_BLITTER; \
|
||||
@ -787,10 +776,6 @@ gen9_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
|
||||
GEN6_READ_FOOTER; \
|
||||
}
|
||||
|
||||
__vgpu_read(8)
|
||||
__vgpu_read(16)
|
||||
__vgpu_read(32)
|
||||
__vgpu_read(64)
|
||||
__gen9_read(8)
|
||||
__gen9_read(16)
|
||||
__gen9_read(32)
|
||||
@ -812,10 +797,37 @@ __gen6_read(64)
|
||||
#undef __chv_read
|
||||
#undef __vlv_read
|
||||
#undef __gen6_read
|
||||
#undef __vgpu_read
|
||||
#undef GEN6_READ_FOOTER
|
||||
#undef GEN6_READ_HEADER
|
||||
|
||||
#define VGPU_READ_HEADER(x) \
|
||||
unsigned long irqflags; \
|
||||
u##x val = 0; \
|
||||
assert_device_not_suspended(dev_priv); \
|
||||
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags)
|
||||
|
||||
#define VGPU_READ_FOOTER \
|
||||
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
|
||||
trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
|
||||
return val
|
||||
|
||||
#define __vgpu_read(x) \
|
||||
static u##x \
|
||||
vgpu_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
|
||||
VGPU_READ_HEADER(x); \
|
||||
val = __raw_i915_read##x(dev_priv, reg); \
|
||||
VGPU_READ_FOOTER; \
|
||||
}
|
||||
|
||||
__vgpu_read(8)
|
||||
__vgpu_read(16)
|
||||
__vgpu_read(32)
|
||||
__vgpu_read(64)
|
||||
|
||||
#undef __vgpu_read
|
||||
#undef VGPU_READ_FOOTER
|
||||
#undef VGPU_READ_HEADER
|
||||
|
||||
#define GEN2_WRITE_HEADER \
|
||||
trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
|
||||
assert_device_not_suspended(dev_priv); \
|
||||
@ -824,7 +836,7 @@ __gen6_read(64)
|
||||
|
||||
#define __gen2_write(x) \
|
||||
static void \
|
||||
gen2_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
|
||||
gen2_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
|
||||
GEN2_WRITE_HEADER; \
|
||||
__raw_i915_write##x(dev_priv, reg, val); \
|
||||
GEN2_WRITE_FOOTER; \
|
||||
@ -832,7 +844,7 @@ gen2_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace
|
||||
|
||||
#define __gen5_write(x) \
|
||||
static void \
|
||||
gen5_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
|
||||
gen5_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
|
||||
GEN2_WRITE_HEADER; \
|
||||
ilk_dummy_write(dev_priv); \
|
||||
__raw_i915_write##x(dev_priv, reg, val); \
|
||||
@ -855,6 +867,7 @@ __gen2_write(64)
|
||||
#undef GEN2_WRITE_HEADER
|
||||
|
||||
#define GEN6_WRITE_HEADER \
|
||||
u32 offset = i915_mmio_reg_offset(reg); \
|
||||
unsigned long irqflags; \
|
||||
trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
|
||||
assert_device_not_suspended(dev_priv); \
|
||||
@ -865,10 +878,10 @@ __gen2_write(64)
|
||||
|
||||
#define __gen6_write(x) \
|
||||
static void \
|
||||
gen6_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
|
||||
gen6_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
|
||||
u32 __fifo_ret = 0; \
|
||||
GEN6_WRITE_HEADER; \
|
||||
if (NEEDS_FORCE_WAKE(reg)) { \
|
||||
if (NEEDS_FORCE_WAKE(offset)) { \
|
||||
__fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
|
||||
} \
|
||||
__raw_i915_write##x(dev_priv, reg, val); \
|
||||
@ -880,10 +893,10 @@ gen6_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace
|
||||
|
||||
#define __hsw_write(x) \
|
||||
static void \
|
||||
hsw_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
|
||||
hsw_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
|
||||
u32 __fifo_ret = 0; \
|
||||
GEN6_WRITE_HEADER; \
|
||||
if (NEEDS_FORCE_WAKE(reg)) { \
|
||||
if (NEEDS_FORCE_WAKE(offset)) { \
|
||||
__fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
|
||||
} \
|
||||
hsw_unclaimed_reg_debug(dev_priv, reg, false, true); \
|
||||
@ -896,15 +909,7 @@ hsw_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace)
|
||||
GEN6_WRITE_FOOTER; \
|
||||
}
|
||||
|
||||
#define __vgpu_write(x) \
|
||||
static void vgpu_write##x(struct drm_i915_private *dev_priv, \
|
||||
off_t reg, u##x val, bool trace) { \
|
||||
GEN6_WRITE_HEADER; \
|
||||
__raw_i915_write##x(dev_priv, reg, val); \
|
||||
GEN6_WRITE_FOOTER; \
|
||||
}
|
||||
|
||||
static const u32 gen8_shadowed_regs[] = {
|
||||
static const i915_reg_t gen8_shadowed_regs[] = {
|
||||
FORCEWAKE_MT,
|
||||
GEN6_RPNSWREQ,
|
||||
GEN6_RC_VIDEO_FREQ,
|
||||
@ -915,11 +920,12 @@ static const u32 gen8_shadowed_regs[] = {
|
||||
/* TODO: Other registers are not yet used */
|
||||
};
|
||||
|
||||
static bool is_gen8_shadowed(struct drm_i915_private *dev_priv, u32 reg)
|
||||
static bool is_gen8_shadowed(struct drm_i915_private *dev_priv,
|
||||
i915_reg_t reg)
|
||||
{
|
||||
int i;
|
||||
for (i = 0; i < ARRAY_SIZE(gen8_shadowed_regs); i++)
|
||||
if (reg == gen8_shadowed_regs[i])
|
||||
if (i915_mmio_reg_equal(reg, gen8_shadowed_regs[i]))
|
||||
return true;
|
||||
|
||||
return false;
|
||||
@ -927,10 +933,10 @@ static bool is_gen8_shadowed(struct drm_i915_private *dev_priv, u32 reg)
|
||||
|
||||
#define __gen8_write(x) \
|
||||
static void \
|
||||
gen8_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
|
||||
gen8_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
|
||||
GEN6_WRITE_HEADER; \
|
||||
hsw_unclaimed_reg_debug(dev_priv, reg, false, true); \
|
||||
if (reg < 0x40000 && !is_gen8_shadowed(dev_priv, reg)) \
|
||||
if (NEEDS_FORCE_WAKE(offset) && !is_gen8_shadowed(dev_priv, reg)) \
|
||||
__force_wake_get(dev_priv, FORCEWAKE_RENDER); \
|
||||
__raw_i915_write##x(dev_priv, reg, val); \
|
||||
hsw_unclaimed_reg_debug(dev_priv, reg, false, false); \
|
||||
@ -940,22 +946,25 @@ gen8_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace
|
||||
|
||||
#define __chv_write(x) \
|
||||
static void \
|
||||
chv_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
|
||||
bool shadowed = is_gen8_shadowed(dev_priv, reg); \
|
||||
chv_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
|
||||
enum forcewake_domains fw_engine = 0; \
|
||||
GEN6_WRITE_HEADER; \
|
||||
if (!shadowed) { \
|
||||
if (FORCEWAKE_CHV_RENDER_RANGE_OFFSET(reg)) \
|
||||
__force_wake_get(dev_priv, FORCEWAKE_RENDER); \
|
||||
else if (FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(reg)) \
|
||||
__force_wake_get(dev_priv, FORCEWAKE_MEDIA); \
|
||||
else if (FORCEWAKE_CHV_COMMON_RANGE_OFFSET(reg)) \
|
||||
__force_wake_get(dev_priv, FORCEWAKE_RENDER | FORCEWAKE_MEDIA); \
|
||||
} \
|
||||
if (!NEEDS_FORCE_WAKE(offset) || \
|
||||
is_gen8_shadowed(dev_priv, reg)) \
|
||||
fw_engine = 0; \
|
||||
else if (FORCEWAKE_CHV_RENDER_RANGE_OFFSET(offset)) \
|
||||
fw_engine = FORCEWAKE_RENDER; \
|
||||
else if (FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(offset)) \
|
||||
fw_engine = FORCEWAKE_MEDIA; \
|
||||
else if (FORCEWAKE_CHV_COMMON_RANGE_OFFSET(offset)) \
|
||||
fw_engine = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \
|
||||
if (fw_engine) \
|
||||
__force_wake_get(dev_priv, fw_engine); \
|
||||
__raw_i915_write##x(dev_priv, reg, val); \
|
||||
GEN6_WRITE_FOOTER; \
|
||||
}
|
||||
|
||||
static const u32 gen9_shadowed_regs[] = {
|
||||
static const i915_reg_t gen9_shadowed_regs[] = {
|
||||
RING_TAIL(RENDER_RING_BASE),
|
||||
RING_TAIL(GEN6_BSD_RING_BASE),
|
||||
RING_TAIL(VEBOX_RING_BASE),
|
||||
@ -968,11 +977,12 @@ static const u32 gen9_shadowed_regs[] = {
|
||||
/* TODO: Other registers are not yet used */
|
||||
};
|
||||
|
||||
static bool is_gen9_shadowed(struct drm_i915_private *dev_priv, u32 reg)
|
||||
static bool is_gen9_shadowed(struct drm_i915_private *dev_priv,
|
||||
i915_reg_t reg)
|
||||
{
|
||||
int i;
|
||||
for (i = 0; i < ARRAY_SIZE(gen9_shadowed_regs); i++)
|
||||
if (reg == gen9_shadowed_regs[i])
|
||||
if (i915_mmio_reg_equal(reg, gen9_shadowed_regs[i]))
|
||||
return true;
|
||||
|
||||
return false;
|
||||
@ -980,19 +990,19 @@ static bool is_gen9_shadowed(struct drm_i915_private *dev_priv, u32 reg)
|
||||
|
||||
#define __gen9_write(x) \
|
||||
static void \
|
||||
gen9_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, \
|
||||
gen9_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, \
|
||||
bool trace) { \
|
||||
enum forcewake_domains fw_engine; \
|
||||
GEN6_WRITE_HEADER; \
|
||||
hsw_unclaimed_reg_debug(dev_priv, reg, false, true); \
|
||||
if (!SKL_NEEDS_FORCE_WAKE(reg) || \
|
||||
if (!SKL_NEEDS_FORCE_WAKE(offset) || \
|
||||
is_gen9_shadowed(dev_priv, reg)) \
|
||||
fw_engine = 0; \
|
||||
else if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg)) \
|
||||
else if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(offset)) \
|
||||
fw_engine = FORCEWAKE_RENDER; \
|
||||
else if (FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg)) \
|
||||
else if (FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(offset)) \
|
||||
fw_engine = FORCEWAKE_MEDIA; \
|
||||
else if (FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg)) \
|
||||
else if (FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(offset)) \
|
||||
fw_engine = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \
|
||||
else \
|
||||
fw_engine = FORCEWAKE_BLITTER; \
|
||||
@ -1024,20 +1034,41 @@ __gen6_write(8)
|
||||
__gen6_write(16)
|
||||
__gen6_write(32)
|
||||
__gen6_write(64)
|
||||
__vgpu_write(8)
|
||||
__vgpu_write(16)
|
||||
__vgpu_write(32)
|
||||
__vgpu_write(64)
|
||||
|
||||
#undef __gen9_write
|
||||
#undef __chv_write
|
||||
#undef __gen8_write
|
||||
#undef __hsw_write
|
||||
#undef __gen6_write
|
||||
#undef __vgpu_write
|
||||
#undef GEN6_WRITE_FOOTER
|
||||
#undef GEN6_WRITE_HEADER
|
||||
|
||||
#define VGPU_WRITE_HEADER \
|
||||
unsigned long irqflags; \
|
||||
trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
|
||||
assert_device_not_suspended(dev_priv); \
|
||||
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags)
|
||||
|
||||
#define VGPU_WRITE_FOOTER \
|
||||
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags)
|
||||
|
||||
#define __vgpu_write(x) \
|
||||
static void vgpu_write##x(struct drm_i915_private *dev_priv, \
|
||||
i915_reg_t reg, u##x val, bool trace) { \
|
||||
VGPU_WRITE_HEADER; \
|
||||
__raw_i915_write##x(dev_priv, reg, val); \
|
||||
VGPU_WRITE_FOOTER; \
|
||||
}
|
||||
|
||||
__vgpu_write(8)
|
||||
__vgpu_write(16)
|
||||
__vgpu_write(32)
|
||||
__vgpu_write(64)
|
||||
|
||||
#undef __vgpu_write
|
||||
#undef VGPU_WRITE_FOOTER
|
||||
#undef VGPU_WRITE_HEADER
|
||||
|
||||
#define ASSIGN_WRITE_MMIO_VFUNCS(x) \
|
||||
do { \
|
||||
dev_priv->uncore.funcs.mmio_writeb = x##_write8; \
|
||||
@ -1057,7 +1088,8 @@ do { \
|
||||
|
||||
static void fw_domain_init(struct drm_i915_private *dev_priv,
|
||||
enum forcewake_domain_id domain_id,
|
||||
u32 reg_set, u32 reg_ack)
|
||||
i915_reg_t reg_set,
|
||||
i915_reg_t reg_ack)
|
||||
{
|
||||
struct intel_uncore_forcewake_domain *d;
|
||||
|
||||
@ -1087,8 +1119,6 @@ static void fw_domain_init(struct drm_i915_private *dev_priv,
|
||||
d->reg_post = FORCEWAKE_ACK_VLV;
|
||||
else if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv) || IS_GEN8(dev_priv))
|
||||
d->reg_post = ECOBUS;
|
||||
else
|
||||
d->reg_post = 0;
|
||||
|
||||
d->i915 = dev_priv;
|
||||
d->id = domain_id;
|
||||
@ -1262,12 +1292,14 @@ void intel_uncore_fini(struct drm_device *dev)
|
||||
#define GEN_RANGE(l, h) GENMASK(h, l)
|
||||
|
||||
static const struct register_whitelist {
|
||||
uint64_t offset;
|
||||
i915_reg_t offset_ldw, offset_udw;
|
||||
uint32_t size;
|
||||
/* supported gens, 0x10 for 4, 0x30 for 4 and 5, etc. */
|
||||
uint32_t gen_bitmask;
|
||||
} whitelist[] = {
|
||||
{ RING_TIMESTAMP(RENDER_RING_BASE), 8, GEN_RANGE(4, 9) },
|
||||
{ .offset_ldw = RING_TIMESTAMP(RENDER_RING_BASE),
|
||||
.offset_udw = RING_TIMESTAMP_UDW(RENDER_RING_BASE),
|
||||
.size = 8, .gen_bitmask = GEN_RANGE(4, 9) },
|
||||
};
|
||||
|
||||
int i915_reg_read_ioctl(struct drm_device *dev,
|
||||
@ -1277,11 +1309,11 @@ int i915_reg_read_ioctl(struct drm_device *dev,
|
||||
struct drm_i915_reg_read *reg = data;
|
||||
struct register_whitelist const *entry = whitelist;
|
||||
unsigned size;
|
||||
u64 offset;
|
||||
i915_reg_t offset_ldw, offset_udw;
|
||||
int i, ret = 0;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) {
|
||||
if (entry->offset == (reg->offset & -entry->size) &&
|
||||
if (i915_mmio_reg_offset(entry->offset_ldw) == (reg->offset & -entry->size) &&
|
||||
(1 << INTEL_INFO(dev)->gen & entry->gen_bitmask))
|
||||
break;
|
||||
}
|
||||
@ -1293,27 +1325,28 @@ int i915_reg_read_ioctl(struct drm_device *dev,
|
||||
* be naturally aligned (and those that are not so aligned merely
|
||||
* limit the available flags for that register).
|
||||
*/
|
||||
offset = entry->offset;
|
||||
offset_ldw = entry->offset_ldw;
|
||||
offset_udw = entry->offset_udw;
|
||||
size = entry->size;
|
||||
size |= reg->offset ^ offset;
|
||||
size |= reg->offset ^ i915_mmio_reg_offset(offset_ldw);
|
||||
|
||||
intel_runtime_pm_get(dev_priv);
|
||||
|
||||
switch (size) {
|
||||
case 8 | 1:
|
||||
reg->val = I915_READ64_2x32(offset, offset+4);
|
||||
reg->val = I915_READ64_2x32(offset_ldw, offset_udw);
|
||||
break;
|
||||
case 8:
|
||||
reg->val = I915_READ64(offset);
|
||||
reg->val = I915_READ64(offset_ldw);
|
||||
break;
|
||||
case 4:
|
||||
reg->val = I915_READ(offset);
|
||||
reg->val = I915_READ(offset_ldw);
|
||||
break;
|
||||
case 2:
|
||||
reg->val = I915_READ16(offset);
|
||||
reg->val = I915_READ16(offset_ldw);
|
||||
break;
|
||||
case 1:
|
||||
reg->val = I915_READ8(offset);
|
||||
reg->val = I915_READ8(offset_ldw);
|
||||
break;
|
||||
default:
|
||||
ret = -EINVAL;
|
||||
@ -1470,7 +1503,7 @@ static int gen6_do_reset(struct drm_device *dev)
|
||||
}
|
||||
|
||||
static int wait_for_register(struct drm_i915_private *dev_priv,
|
||||
const u32 reg,
|
||||
i915_reg_t reg,
|
||||
const u32 mask,
|
||||
const u32 value,
|
||||
const unsigned long timeout_ms)
|
||||
|
@ -3405,7 +3405,9 @@ static int reset_intel_82599_sfp_virtfn(struct pci_dev *dev, int probe)
|
||||
return 0;
|
||||
}
|
||||
|
||||
#include "../gpu/drm/i915/i915_reg.h"
|
||||
#define SOUTH_CHICKEN2 0xc2004
|
||||
#define PCH_PP_STATUS 0xc7200
|
||||
#define PCH_PP_CONTROL 0xc7204
|
||||
#define MSG_CTL 0x45010
|
||||
#define NSDE_PWR_STATE 0xd0100
|
||||
#define IGD_OPERATION_TIMEOUT 10000 /* set timeout 10 seconds */
|
||||
|
@ -31,47 +31,80 @@
|
||||
#define MAX_PORTS 5
|
||||
|
||||
/**
|
||||
* struct i915_audio_component_ops - callbacks defined in gfx driver
|
||||
* @owner: the module owner
|
||||
* @get_power: get the POWER_DOMAIN_AUDIO power well
|
||||
* @put_power: put the POWER_DOMAIN_AUDIO power well
|
||||
* @codec_wake_override: Enable/Disable generating the codec wake signal
|
||||
* @get_cdclk_freq: get the Core Display Clock in KHz
|
||||
* @sync_audio_rate: set n/cts based on the sample rate
|
||||
* struct i915_audio_component_ops - Ops implemented by i915 driver, called by hda driver
|
||||
*/
|
||||
struct i915_audio_component_ops {
|
||||
/**
|
||||
* @owner: i915 module
|
||||
*/
|
||||
struct module *owner;
|
||||
/**
|
||||
* @get_power: get the POWER_DOMAIN_AUDIO power well
|
||||
*
|
||||
* Request the power well to be turned on.
|
||||
*/
|
||||
void (*get_power)(struct device *);
|
||||
/**
|
||||
* @put_power: put the POWER_DOMAIN_AUDIO power well
|
||||
*
|
||||
* Allow the power well to be turned off.
|
||||
*/
|
||||
void (*put_power)(struct device *);
|
||||
/**
|
||||
* @codec_wake_override: Enable/disable codec wake signal
|
||||
*/
|
||||
void (*codec_wake_override)(struct device *, bool enable);
|
||||
/**
|
||||
* @get_cdclk_freq: Get the Core Display Clock in kHz
|
||||
*/
|
||||
int (*get_cdclk_freq)(struct device *);
|
||||
/**
|
||||
* @sync_audio_rate: set n/cts based on the sample rate
|
||||
*
|
||||
* Called from audio driver. After audio driver sets the
|
||||
* sample rate, it will call this function to set n/cts
|
||||
*/
|
||||
int (*sync_audio_rate)(struct device *, int port, int rate);
|
||||
};
|
||||
|
||||
/**
|
||||
* struct i915_audio_component_audio_ops - Ops implemented by hda driver, called by i915 driver
|
||||
*/
|
||||
struct i915_audio_component_audio_ops {
|
||||
/**
|
||||
* @audio_ptr: Pointer to be used in call to pin_eld_notify
|
||||
*/
|
||||
void *audio_ptr;
|
||||
/**
|
||||
* Call from i915 driver, notifying the HDA driver that
|
||||
* pin sense and/or ELD information has changed.
|
||||
* @audio_ptr: HDA driver object
|
||||
* @port: Which port has changed (PORTA / PORTB / PORTC etc)
|
||||
* @pin_eld_notify: Notify the HDA driver that pin sense and/or ELD information has changed
|
||||
*
|
||||
* Called when the i915 driver has set up audio pipeline or has just
|
||||
* begun to tear it down. This allows the HDA driver to update its
|
||||
* status accordingly (even when the HDA controller is in power save
|
||||
* mode).
|
||||
*/
|
||||
void (*pin_eld_notify)(void *audio_ptr, int port);
|
||||
};
|
||||
|
||||
/**
|
||||
* struct i915_audio_component - used for audio video interaction
|
||||
* @dev: the device from gfx driver
|
||||
* @aud_sample_rate: the array of audio sample rate per port
|
||||
* @ops: callback for audio driver calling
|
||||
* @audio_ops: Call from i915 driver
|
||||
* struct i915_audio_component - Used for direct communication between i915 and hda drivers
|
||||
*/
|
||||
struct i915_audio_component {
|
||||
/**
|
||||
* @dev: i915 device, used as parameter for ops
|
||||
*/
|
||||
struct device *dev;
|
||||
/**
|
||||
* @aud_sample_rate: the array of audio sample rate per port
|
||||
*/
|
||||
int aud_sample_rate[MAX_PORTS];
|
||||
|
||||
/**
|
||||
* @ops: Ops implemented by i915 driver, called by hda driver
|
||||
*/
|
||||
const struct i915_audio_component_ops *ops;
|
||||
|
||||
/**
|
||||
* @audio_ops: Ops implemented by hda driver, called by i915 driver
|
||||
*/
|
||||
const struct i915_audio_component_audio_ops *audio_ops;
|
||||
};
|
||||
|
||||
|
@ -291,4 +291,40 @@
|
||||
INTEL_VGA_DEVICE(0x1A84, info), \
|
||||
INTEL_VGA_DEVICE(0x5A84, info)
|
||||
|
||||
#define INTEL_KBL_GT1_IDS(info) \
|
||||
INTEL_VGA_DEVICE(0x5913, info), /* ULT GT1.5 */ \
|
||||
INTEL_VGA_DEVICE(0x5915, info), /* ULX GT1.5 */ \
|
||||
INTEL_VGA_DEVICE(0x5917, info), /* DT GT1.5 */ \
|
||||
INTEL_VGA_DEVICE(0x5906, info), /* ULT GT1 */ \
|
||||
INTEL_VGA_DEVICE(0x590E, info), /* ULX GT1 */ \
|
||||
INTEL_VGA_DEVICE(0x5902, info), /* DT GT1 */ \
|
||||
INTEL_VGA_DEVICE(0x590B, info), /* Halo GT1 */ \
|
||||
INTEL_VGA_DEVICE(0x590A, info) /* SRV GT1 */
|
||||
|
||||
#define INTEL_KBL_GT2_IDS(info) \
|
||||
INTEL_VGA_DEVICE(0x5916, info), /* ULT GT2 */ \
|
||||
INTEL_VGA_DEVICE(0x5921, info), /* ULT GT2F */ \
|
||||
INTEL_VGA_DEVICE(0x591E, info), /* ULX GT2 */ \
|
||||
INTEL_VGA_DEVICE(0x5912, info), /* DT GT2 */ \
|
||||
INTEL_VGA_DEVICE(0x591B, info), /* Halo GT2 */ \
|
||||
INTEL_VGA_DEVICE(0x591A, info), /* SRV GT2 */ \
|
||||
INTEL_VGA_DEVICE(0x591D, info) /* WKS GT2 */
|
||||
|
||||
#define INTEL_KBL_GT3_IDS(info) \
|
||||
INTEL_VGA_DEVICE(0x5926, info), /* ULT GT3 */ \
|
||||
INTEL_VGA_DEVICE(0x592B, info), /* Halo GT3 */ \
|
||||
INTEL_VGA_DEVICE(0x592A, info) /* SRV GT3 */
|
||||
|
||||
#define INTEL_KBL_GT4_IDS(info) \
|
||||
INTEL_VGA_DEVICE(0x5932, info), /* DT GT4 */ \
|
||||
INTEL_VGA_DEVICE(0x593B, info), /* Halo GT4 */ \
|
||||
INTEL_VGA_DEVICE(0x593A, info), /* SRV GT4 */ \
|
||||
INTEL_VGA_DEVICE(0x593D, info) /* WKS GT4 */
|
||||
|
||||
#define INTEL_KBL_IDS(info) \
|
||||
INTEL_KBL_GT1_IDS(info), \
|
||||
INTEL_KBL_GT2_IDS(info), \
|
||||
INTEL_KBL_GT3_IDS(info), \
|
||||
INTEL_KBL_GT4_IDS(info)
|
||||
|
||||
#endif /* _I915_PCIIDS_H */
|
||||
|
@ -1079,6 +1079,12 @@ struct drm_i915_gem_context_destroy {
|
||||
};
|
||||
|
||||
struct drm_i915_reg_read {
|
||||
/*
|
||||
* Register offset.
|
||||
* For 64bit wide registers where the upper 32bits don't immediately
|
||||
* follow the lower 32bits, the offset of the lower 32bits must
|
||||
* be specified
|
||||
*/
|
||||
__u64 offset;
|
||||
__u64 val; /* Return value */
|
||||
};
|
||||
@ -1125,8 +1131,9 @@ struct drm_i915_gem_context_param {
|
||||
__u32 ctx_id;
|
||||
__u32 size;
|
||||
__u64 param;
|
||||
#define I915_CONTEXT_PARAM_BAN_PERIOD 0x1
|
||||
#define I915_CONTEXT_PARAM_NO_ZEROMAP 0x2
|
||||
#define I915_CONTEXT_PARAM_BAN_PERIOD 0x1
|
||||
#define I915_CONTEXT_PARAM_NO_ZEROMAP 0x2
|
||||
#define I915_CONTEXT_PARAM_GTT_SIZE 0x3
|
||||
__u64 value;
|
||||
};
|
||||
|
||||
|
@ -326,3 +326,4 @@ bool current_is_async(void)
|
||||
|
||||
return worker && worker->current_func == async_run_entry_fn;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(current_is_async);
|
||||
|
Loading…
Reference in New Issue
Block a user