forked from Minki/linux
Merge tag 'drm-intel-next-2015-05-08' of git://anongit.freedesktop.org/drm-intel into drm-next
- skl plane scaler support (Chandra Kondru) - enable hsw cmd parser (Daniel and fix from Rebecca Palmer) - skl dc5/6 support (low power display modes) from Suketu&Sunil - dp compliance testing patches (Todd Previte) - dp link training optimization (Mika Kahola) - fixes to make skl resume work (Damien) - rework modeset code to fully use atomic state objects (Ander&Maarten) - pile of bxt w/a patchs from Nick Hoath - (linear) partial gtt mmap support (Joonas Lahtinen) * tag 'drm-intel-next-2015-05-08' of git://anongit.freedesktop.org/drm-intel: (103 commits) drm/i915: Update DRIVER_DATE to 20150508 drm/i915: Only wait for required lanes in vlv_wait_port_ready() drm/i915: Fix possible security hole in command parsing drm/edid: Kerneldoc for newly added edid_corrupt drm/i915: Reject huge tiled objects Revert "drm/i915: Hack to tie both common lanes together on chv" drm/i915: Work around DISPLAY_PHY_CONTROL register corruption on CHV drm/i915: Implement chv display PHY lane stagger setup drm/i915/vlv: remove wait for previous GFX clk disable request drm/i915: Set crtc_state->active to false when CRTC is disabled (v2) drm/i915/skl: Re-indent part of skl_ddi_calculate_wrpll() drm/i915: Use partial view in mmap fault handler drm/i915: Add a partial GGTT view type drm/i915: Consider object pinned if any VMA is pinned drm/i915: Do not make assumptions on GGTT VMA sizes drm/i915/bxt: Mark WaCcsTlbPrefetchDisable as for Broxton also. drm/i915/bxt: Mark WaDisablePartialResolveInVc as for Broxton also. drm/i915/bxt: Mark Wa4x4STCOptimizationDisable as for Broxton also. drm/i915/bxt: Move WaForceEnableNonCoherent to Skylake only drm/i915/bxt: Enable WaEnableYV12BugFixInHalfSliceChicken7 for Broxton ...
This commit is contained in:
commit
d0093404f8
@ -280,6 +280,8 @@ mode_fixup(struct drm_atomic_state *state)
|
||||
*/
|
||||
encoder = conn_state->best_encoder;
|
||||
funcs = encoder->helper_private;
|
||||
if (!funcs)
|
||||
continue;
|
||||
|
||||
if (encoder->bridge && encoder->bridge->funcs->mode_fixup) {
|
||||
ret = encoder->bridge->funcs->mode_fixup(
|
||||
@ -317,6 +319,9 @@ mode_fixup(struct drm_atomic_state *state)
|
||||
continue;
|
||||
|
||||
funcs = crtc->helper_private;
|
||||
if (!funcs->mode_fixup)
|
||||
continue;
|
||||
|
||||
ret = funcs->mode_fixup(crtc, &crtc_state->mode,
|
||||
&crtc_state->adjusted_mode);
|
||||
if (!ret) {
|
||||
|
@ -1041,13 +1041,15 @@ static bool drm_edid_is_zero(const u8 *in_edid, int length)
|
||||
* @raw_edid: pointer to raw EDID block
|
||||
* @block: type of block to validate (0 for base, extension otherwise)
|
||||
* @print_bad_edid: if true, dump bad EDID blocks to the console
|
||||
* @edid_corrupt: if true, the header or checksum is invalid
|
||||
*
|
||||
* Validate a base or extension EDID block and optionally dump bad blocks to
|
||||
* the console.
|
||||
*
|
||||
* Return: True if the block is valid, false otherwise.
|
||||
*/
|
||||
bool drm_edid_block_valid(u8 *raw_edid, int block, bool print_bad_edid)
|
||||
bool drm_edid_block_valid(u8 *raw_edid, int block, bool print_bad_edid,
|
||||
bool *edid_corrupt)
|
||||
{
|
||||
u8 csum;
|
||||
struct edid *edid = (struct edid *)raw_edid;
|
||||
@ -1060,11 +1062,22 @@ bool drm_edid_block_valid(u8 *raw_edid, int block, bool print_bad_edid)
|
||||
|
||||
if (block == 0) {
|
||||
int score = drm_edid_header_is_valid(raw_edid);
|
||||
if (score == 8) ;
|
||||
else if (score >= edid_fixup) {
|
||||
if (score == 8) {
|
||||
if (edid_corrupt)
|
||||
*edid_corrupt = false;
|
||||
} else if (score >= edid_fixup) {
|
||||
/* Displayport Link CTS Core 1.2 rev1.1 test 4.2.2.6
|
||||
* The corrupt flag needs to be set here otherwise, the
|
||||
* fix-up code here will correct the problem, the
|
||||
* checksum is correct and the test fails
|
||||
*/
|
||||
if (edid_corrupt)
|
||||
*edid_corrupt = true;
|
||||
DRM_DEBUG("Fixing EDID header, your hardware may be failing\n");
|
||||
memcpy(raw_edid, edid_header, sizeof(edid_header));
|
||||
} else {
|
||||
if (edid_corrupt)
|
||||
*edid_corrupt = true;
|
||||
goto bad;
|
||||
}
|
||||
}
|
||||
@ -1075,6 +1088,9 @@ bool drm_edid_block_valid(u8 *raw_edid, int block, bool print_bad_edid)
|
||||
DRM_ERROR("EDID checksum is invalid, remainder is %d\n", csum);
|
||||
}
|
||||
|
||||
if (edid_corrupt)
|
||||
*edid_corrupt = true;
|
||||
|
||||
/* allow CEA to slide through, switches mangle this */
|
||||
if (raw_edid[0] != 0x02)
|
||||
goto bad;
|
||||
@ -1129,7 +1145,7 @@ bool drm_edid_is_valid(struct edid *edid)
|
||||
return false;
|
||||
|
||||
for (i = 0; i <= edid->extensions; i++)
|
||||
if (!drm_edid_block_valid(raw + i * EDID_LENGTH, i, true))
|
||||
if (!drm_edid_block_valid(raw + i * EDID_LENGTH, i, true, NULL))
|
||||
return false;
|
||||
|
||||
return true;
|
||||
@ -1232,7 +1248,8 @@ struct edid *drm_do_get_edid(struct drm_connector *connector,
|
||||
for (i = 0; i < 4; i++) {
|
||||
if (get_edid_block(data, block, 0, EDID_LENGTH))
|
||||
goto out;
|
||||
if (drm_edid_block_valid(block, 0, print_bad_edid))
|
||||
if (drm_edid_block_valid(block, 0, print_bad_edid,
|
||||
&connector->edid_corrupt))
|
||||
break;
|
||||
if (i == 0 && drm_edid_is_zero(block, EDID_LENGTH)) {
|
||||
connector->null_edid_counter++;
|
||||
@ -1257,7 +1274,10 @@ struct edid *drm_do_get_edid(struct drm_connector *connector,
|
||||
block + (valid_extensions + 1) * EDID_LENGTH,
|
||||
j, EDID_LENGTH))
|
||||
goto out;
|
||||
if (drm_edid_block_valid(block + (valid_extensions + 1) * EDID_LENGTH, j, print_bad_edid)) {
|
||||
if (drm_edid_block_valid(block + (valid_extensions + 1)
|
||||
* EDID_LENGTH, j,
|
||||
print_bad_edid,
|
||||
NULL)) {
|
||||
valid_extensions++;
|
||||
break;
|
||||
}
|
||||
|
@ -216,7 +216,8 @@ static void *edid_load(struct drm_connector *connector, const char *name,
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (!drm_edid_block_valid(edid, 0, print_bad_edid)) {
|
||||
if (!drm_edid_block_valid(edid, 0, print_bad_edid,
|
||||
&connector->edid_corrupt)) {
|
||||
connector->bad_edid_counter++;
|
||||
DRM_ERROR("Base block of EDID firmware \"%s\" is invalid ",
|
||||
name);
|
||||
@ -229,7 +230,9 @@ static void *edid_load(struct drm_connector *connector, const char *name,
|
||||
if (i != valid_extensions + 1)
|
||||
memcpy(edid + (valid_extensions + 1) * EDID_LENGTH,
|
||||
edid + i * EDID_LENGTH, EDID_LENGTH);
|
||||
if (drm_edid_block_valid(edid + i * EDID_LENGTH, i, print_bad_edid))
|
||||
if (drm_edid_block_valid(edid + i * EDID_LENGTH, i,
|
||||
print_bad_edid,
|
||||
NULL))
|
||||
valid_extensions++;
|
||||
}
|
||||
|
||||
|
@ -12,7 +12,8 @@ i915-y := i915_drv.o \
|
||||
i915_suspend.o \
|
||||
i915_sysfs.o \
|
||||
intel_pm.o \
|
||||
intel_runtime_pm.o
|
||||
intel_runtime_pm.o \
|
||||
intel_csr.o
|
||||
|
||||
i915-$(CONFIG_COMPAT) += i915_ioc32.o
|
||||
i915-$(CONFIG_DEBUG_FS) += i915_debugfs.o
|
||||
|
@ -1211,12 +1211,17 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
|
||||
GEN6_CURBSYTAVG_MASK);
|
||||
seq_printf(m, "RP PREV UP: %dus\n", rpprevup &
|
||||
GEN6_CURBSYTAVG_MASK);
|
||||
seq_printf(m, "Up threshold: %d%%\n",
|
||||
dev_priv->rps.up_threshold);
|
||||
|
||||
seq_printf(m, "RP CUR DOWN EI: %dus\n", rpdownei &
|
||||
GEN6_CURIAVG_MASK);
|
||||
seq_printf(m, "RP CUR DOWN: %dus\n", rpcurdown &
|
||||
GEN6_CURBSYTAVG_MASK);
|
||||
seq_printf(m, "RP PREV DOWN: %dus\n", rpprevdown &
|
||||
GEN6_CURBSYTAVG_MASK);
|
||||
seq_printf(m, "Down threshold: %d%%\n",
|
||||
dev_priv->rps.down_threshold);
|
||||
|
||||
max_freq = (rp_state_cap & 0xff0000) >> 16;
|
||||
max_freq *= (IS_SKYLAKE(dev) ? GEN9_FREQ_SCALER : 1);
|
||||
@ -1232,12 +1237,21 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
|
||||
max_freq *= (IS_SKYLAKE(dev) ? GEN9_FREQ_SCALER : 1);
|
||||
seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
|
||||
intel_gpu_freq(dev_priv, max_freq));
|
||||
|
||||
seq_printf(m, "Max overclocked frequency: %dMHz\n",
|
||||
intel_gpu_freq(dev_priv, dev_priv->rps.max_freq));
|
||||
|
||||
seq_printf(m, "Current freq: %d MHz\n",
|
||||
intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq));
|
||||
seq_printf(m, "Actual freq: %d MHz\n", cagf);
|
||||
seq_printf(m, "Idle freq: %d MHz\n",
|
||||
intel_gpu_freq(dev_priv, dev_priv->rps.idle_freq));
|
||||
seq_printf(m, "Min freq: %d MHz\n",
|
||||
intel_gpu_freq(dev_priv, dev_priv->rps.min_freq));
|
||||
seq_printf(m, "Max freq: %d MHz\n",
|
||||
intel_gpu_freq(dev_priv, dev_priv->rps.max_freq));
|
||||
seq_printf(m,
|
||||
"efficient (RPe) frequency: %d MHz\n",
|
||||
intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq));
|
||||
} else if (IS_VALLEYVIEW(dev)) {
|
||||
u32 freq_sts;
|
||||
|
||||
@ -1246,6 +1260,12 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
|
||||
seq_printf(m, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts);
|
||||
seq_printf(m, "DDR freq: %d MHz\n", dev_priv->mem_freq);
|
||||
|
||||
seq_printf(m, "actual GPU freq: %d MHz\n",
|
||||
intel_gpu_freq(dev_priv, (freq_sts >> 8) & 0xff));
|
||||
|
||||
seq_printf(m, "current GPU freq: %d MHz\n",
|
||||
intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq));
|
||||
|
||||
seq_printf(m, "max GPU freq: %d MHz\n",
|
||||
intel_gpu_freq(dev_priv, dev_priv->rps.max_freq));
|
||||
|
||||
@ -1258,9 +1278,6 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
|
||||
seq_printf(m,
|
||||
"efficient (RPe) frequency: %d MHz\n",
|
||||
intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq));
|
||||
|
||||
seq_printf(m, "current GPU freq: %d MHz\n",
|
||||
intel_gpu_freq(dev_priv, (freq_sts >> 8) & 0xff));
|
||||
mutex_unlock(&dev_priv->rps.hw_lock);
|
||||
} else {
|
||||
seq_puts(m, "no P-state info available\n");
|
||||
@ -3594,8 +3611,7 @@ static void hsw_trans_edp_pipe_A_crc_wa(struct drm_device *dev)
|
||||
intel_display_power_get(dev_priv,
|
||||
POWER_DOMAIN_PIPE_PANEL_FITTER(PIPE_A));
|
||||
|
||||
dev_priv->display.crtc_disable(&crtc->base);
|
||||
dev_priv->display.crtc_enable(&crtc->base);
|
||||
intel_crtc_reset(crtc);
|
||||
}
|
||||
drm_modeset_unlock_all(dev);
|
||||
}
|
||||
@ -3616,8 +3632,7 @@ static void hsw_undo_trans_edp_pipe_A_crc_wa(struct drm_device *dev)
|
||||
if (crtc->config->pch_pfit.force_thru) {
|
||||
crtc->config->pch_pfit.force_thru = false;
|
||||
|
||||
dev_priv->display.crtc_disable(&crtc->base);
|
||||
dev_priv->display.crtc_enable(&crtc->base);
|
||||
intel_crtc_reset(crtc);
|
||||
|
||||
intel_display_power_put(dev_priv,
|
||||
POWER_DOMAIN_PIPE_PANEL_FITTER(PIPE_A));
|
||||
@ -3934,6 +3949,212 @@ static const struct file_operations i915_display_crc_ctl_fops = {
|
||||
.write = display_crc_ctl_write
|
||||
};
|
||||
|
||||
static ssize_t i915_displayport_test_active_write(struct file *file,
|
||||
const char __user *ubuf,
|
||||
size_t len, loff_t *offp)
|
||||
{
|
||||
char *input_buffer;
|
||||
int status = 0;
|
||||
struct seq_file *m;
|
||||
struct drm_device *dev;
|
||||
struct drm_connector *connector;
|
||||
struct list_head *connector_list;
|
||||
struct intel_dp *intel_dp;
|
||||
int val = 0;
|
||||
|
||||
m = file->private_data;
|
||||
if (!m) {
|
||||
status = -ENODEV;
|
||||
return status;
|
||||
}
|
||||
dev = m->private;
|
||||
|
||||
if (!dev) {
|
||||
status = -ENODEV;
|
||||
return status;
|
||||
}
|
||||
connector_list = &dev->mode_config.connector_list;
|
||||
|
||||
if (len == 0)
|
||||
return 0;
|
||||
|
||||
input_buffer = kmalloc(len + 1, GFP_KERNEL);
|
||||
if (!input_buffer)
|
||||
return -ENOMEM;
|
||||
|
||||
if (copy_from_user(input_buffer, ubuf, len)) {
|
||||
status = -EFAULT;
|
||||
goto out;
|
||||
}
|
||||
|
||||
input_buffer[len] = '\0';
|
||||
DRM_DEBUG_DRIVER("Copied %d bytes from user\n", (unsigned int)len);
|
||||
|
||||
list_for_each_entry(connector, connector_list, head) {
|
||||
|
||||
if (connector->connector_type !=
|
||||
DRM_MODE_CONNECTOR_DisplayPort)
|
||||
continue;
|
||||
|
||||
if (connector->connector_type ==
|
||||
DRM_MODE_CONNECTOR_DisplayPort &&
|
||||
connector->status == connector_status_connected &&
|
||||
connector->encoder != NULL) {
|
||||
intel_dp = enc_to_intel_dp(connector->encoder);
|
||||
status = kstrtoint(input_buffer, 10, &val);
|
||||
if (status < 0)
|
||||
goto out;
|
||||
DRM_DEBUG_DRIVER("Got %d for test active\n", val);
|
||||
/* To prevent erroneous activation of the compliance
|
||||
* testing code, only accept an actual value of 1 here
|
||||
*/
|
||||
if (val == 1)
|
||||
intel_dp->compliance_test_active = 1;
|
||||
else
|
||||
intel_dp->compliance_test_active = 0;
|
||||
}
|
||||
}
|
||||
out:
|
||||
kfree(input_buffer);
|
||||
if (status < 0)
|
||||
return status;
|
||||
|
||||
*offp += len;
|
||||
return len;
|
||||
}
|
||||
|
||||
static int i915_displayport_test_active_show(struct seq_file *m, void *data)
|
||||
{
|
||||
struct drm_device *dev = m->private;
|
||||
struct drm_connector *connector;
|
||||
struct list_head *connector_list = &dev->mode_config.connector_list;
|
||||
struct intel_dp *intel_dp;
|
||||
|
||||
if (!dev)
|
||||
return -ENODEV;
|
||||
|
||||
list_for_each_entry(connector, connector_list, head) {
|
||||
|
||||
if (connector->connector_type !=
|
||||
DRM_MODE_CONNECTOR_DisplayPort)
|
||||
continue;
|
||||
|
||||
if (connector->status == connector_status_connected &&
|
||||
connector->encoder != NULL) {
|
||||
intel_dp = enc_to_intel_dp(connector->encoder);
|
||||
if (intel_dp->compliance_test_active)
|
||||
seq_puts(m, "1");
|
||||
else
|
||||
seq_puts(m, "0");
|
||||
} else
|
||||
seq_puts(m, "0");
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int i915_displayport_test_active_open(struct inode *inode,
|
||||
struct file *file)
|
||||
{
|
||||
struct drm_device *dev = inode->i_private;
|
||||
|
||||
return single_open(file, i915_displayport_test_active_show, dev);
|
||||
}
|
||||
|
||||
static const struct file_operations i915_displayport_test_active_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.open = i915_displayport_test_active_open,
|
||||
.read = seq_read,
|
||||
.llseek = seq_lseek,
|
||||
.release = single_release,
|
||||
.write = i915_displayport_test_active_write
|
||||
};
|
||||
|
||||
static int i915_displayport_test_data_show(struct seq_file *m, void *data)
|
||||
{
|
||||
struct drm_device *dev = m->private;
|
||||
struct drm_connector *connector;
|
||||
struct list_head *connector_list = &dev->mode_config.connector_list;
|
||||
struct intel_dp *intel_dp;
|
||||
|
||||
if (!dev)
|
||||
return -ENODEV;
|
||||
|
||||
list_for_each_entry(connector, connector_list, head) {
|
||||
|
||||
if (connector->connector_type !=
|
||||
DRM_MODE_CONNECTOR_DisplayPort)
|
||||
continue;
|
||||
|
||||
if (connector->status == connector_status_connected &&
|
||||
connector->encoder != NULL) {
|
||||
intel_dp = enc_to_intel_dp(connector->encoder);
|
||||
seq_printf(m, "%lx", intel_dp->compliance_test_data);
|
||||
} else
|
||||
seq_puts(m, "0");
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
static int i915_displayport_test_data_open(struct inode *inode,
|
||||
struct file *file)
|
||||
{
|
||||
struct drm_device *dev = inode->i_private;
|
||||
|
||||
return single_open(file, i915_displayport_test_data_show, dev);
|
||||
}
|
||||
|
||||
static const struct file_operations i915_displayport_test_data_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.open = i915_displayport_test_data_open,
|
||||
.read = seq_read,
|
||||
.llseek = seq_lseek,
|
||||
.release = single_release
|
||||
};
|
||||
|
||||
static int i915_displayport_test_type_show(struct seq_file *m, void *data)
|
||||
{
|
||||
struct drm_device *dev = m->private;
|
||||
struct drm_connector *connector;
|
||||
struct list_head *connector_list = &dev->mode_config.connector_list;
|
||||
struct intel_dp *intel_dp;
|
||||
|
||||
if (!dev)
|
||||
return -ENODEV;
|
||||
|
||||
list_for_each_entry(connector, connector_list, head) {
|
||||
|
||||
if (connector->connector_type !=
|
||||
DRM_MODE_CONNECTOR_DisplayPort)
|
||||
continue;
|
||||
|
||||
if (connector->status == connector_status_connected &&
|
||||
connector->encoder != NULL) {
|
||||
intel_dp = enc_to_intel_dp(connector->encoder);
|
||||
seq_printf(m, "%02lx", intel_dp->compliance_test_type);
|
||||
} else
|
||||
seq_puts(m, "0");
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int i915_displayport_test_type_open(struct inode *inode,
|
||||
struct file *file)
|
||||
{
|
||||
struct drm_device *dev = inode->i_private;
|
||||
|
||||
return single_open(file, i915_displayport_test_type_show, dev);
|
||||
}
|
||||
|
||||
static const struct file_operations i915_displayport_test_type_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.open = i915_displayport_test_type_open,
|
||||
.read = seq_read,
|
||||
.llseek = seq_lseek,
|
||||
.release = single_release
|
||||
};
|
||||
|
||||
static void wm_latency_show(struct seq_file *m, const uint16_t wm[8])
|
||||
{
|
||||
struct drm_device *dev = m->private;
|
||||
@ -4829,6 +5050,9 @@ static const struct i915_debugfs_files {
|
||||
{"i915_spr_wm_latency", &i915_spr_wm_latency_fops},
|
||||
{"i915_cur_wm_latency", &i915_cur_wm_latency_fops},
|
||||
{"i915_fbc_false_color", &i915_fbc_fc_fops},
|
||||
{"i915_dp_test_data", &i915_displayport_test_data_fops},
|
||||
{"i915_dp_test_type", &i915_displayport_test_type_fops},
|
||||
{"i915_dp_test_active", &i915_displayport_test_active_fops}
|
||||
};
|
||||
|
||||
void intel_display_crc_init(struct drm_device *dev)
|
||||
|
@ -816,6 +816,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
|
||||
spin_lock_init(&dev_priv->mmio_flip_lock);
|
||||
mutex_init(&dev_priv->dpio_lock);
|
||||
mutex_init(&dev_priv->modeset_restore_lock);
|
||||
mutex_init(&dev_priv->csr_lock);
|
||||
|
||||
intel_pm_setup(dev);
|
||||
|
||||
@ -861,9 +862,12 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
|
||||
|
||||
intel_uncore_init(dev);
|
||||
|
||||
/* Load CSR Firmware for SKL */
|
||||
intel_csr_ucode_init(dev);
|
||||
|
||||
ret = i915_gem_gtt_init(dev);
|
||||
if (ret)
|
||||
goto out_regs;
|
||||
goto out_freecsr;
|
||||
|
||||
/* WARNING: Apparently we must kick fbdev drivers before vgacon,
|
||||
* otherwise the vga fbdev driver falls over. */
|
||||
@ -1033,7 +1037,8 @@ out_mtrrfree:
|
||||
io_mapping_free(dev_priv->gtt.mappable);
|
||||
out_gtt:
|
||||
i915_global_gtt_cleanup(dev);
|
||||
out_regs:
|
||||
out_freecsr:
|
||||
intel_csr_ucode_fini(dev);
|
||||
intel_uncore_fini(dev);
|
||||
pci_iounmap(dev->pdev, dev_priv->regs);
|
||||
put_bridge:
|
||||
@ -1113,6 +1118,8 @@ int i915_driver_unload(struct drm_device *dev)
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
i915_gem_cleanup_stolen(dev);
|
||||
|
||||
intel_csr_ucode_fini(dev);
|
||||
|
||||
intel_teardown_gmbus(dev);
|
||||
intel_teardown_mchbar(dev);
|
||||
|
||||
|
@ -556,6 +556,26 @@ void intel_hpd_cancel_work(struct drm_i915_private *dev_priv)
|
||||
cancel_delayed_work_sync(&dev_priv->hotplug_reenable_work);
|
||||
}
|
||||
|
||||
void i915_firmware_load_error_print(const char *fw_path, int err)
|
||||
{
|
||||
DRM_ERROR("failed to load firmware %s (%d)\n", fw_path, err);
|
||||
|
||||
/*
|
||||
* If the reason is not known assume -ENOENT since that's the most
|
||||
* usual failure mode.
|
||||
*/
|
||||
if (!err)
|
||||
err = -ENOENT;
|
||||
|
||||
if (!(IS_BUILTIN(CONFIG_DRM_I915) && err == -ENOENT))
|
||||
return;
|
||||
|
||||
DRM_ERROR(
|
||||
"The driver is built-in, so to load the firmware you need to\n"
|
||||
"include it either in the kernel (see CONFIG_EXTRA_FIRMWARE) or\n"
|
||||
"in your initrd/initramfs image.\n");
|
||||
}
|
||||
|
||||
static void intel_suspend_encoders(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
@ -574,6 +594,8 @@ static void intel_suspend_encoders(struct drm_i915_private *dev_priv)
|
||||
static int intel_suspend_complete(struct drm_i915_private *dev_priv);
|
||||
static int vlv_resume_prepare(struct drm_i915_private *dev_priv,
|
||||
bool rpm_resume);
|
||||
static int skl_resume_prepare(struct drm_i915_private *dev_priv);
|
||||
|
||||
|
||||
static int i915_drm_suspend(struct drm_device *dev)
|
||||
{
|
||||
@ -788,6 +810,8 @@ static int i915_drm_resume_early(struct drm_device *dev)
|
||||
|
||||
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
|
||||
hsw_disable_pc8(dev_priv);
|
||||
else if (IS_SKYLAKE(dev_priv))
|
||||
ret = skl_resume_prepare(dev_priv);
|
||||
|
||||
intel_uncore_sanitize(dev);
|
||||
intel_power_domains_init_hw(dev_priv);
|
||||
@ -1002,6 +1026,19 @@ static int i915_pm_resume(struct device *dev)
|
||||
return i915_drm_resume(drm_dev);
|
||||
}
|
||||
|
||||
static int skl_suspend_complete(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
/* Enabling DC6 is not a hard requirement to enter runtime D3 */
|
||||
|
||||
/*
|
||||
* This is to ensure that CSR isn't identified as loaded before
|
||||
* CSR-loading program is called during runtime-resume.
|
||||
*/
|
||||
intel_csr_load_status_set(dev_priv, FW_UNINITIALIZED);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int hsw_suspend_complete(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
hsw_enable_pc8(dev_priv);
|
||||
@ -1041,6 +1078,15 @@ static int bxt_resume_prepare(struct drm_i915_private *dev_priv)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int skl_resume_prepare(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
|
||||
intel_csr_load_program(dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Save all Gunit registers that may be lost after a D3 and a subsequent
|
||||
* S0i[R123] transition. The list of registers needing a save/restore is
|
||||
@ -1502,6 +1548,8 @@ static int intel_runtime_resume(struct device *device)
|
||||
|
||||
if (IS_BROXTON(dev))
|
||||
ret = bxt_resume_prepare(dev_priv);
|
||||
else if (IS_SKYLAKE(dev))
|
||||
ret = skl_resume_prepare(dev_priv);
|
||||
else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
|
||||
hsw_disable_pc8(dev_priv);
|
||||
else if (IS_VALLEYVIEW(dev_priv))
|
||||
@ -1536,6 +1584,8 @@ static int intel_suspend_complete(struct drm_i915_private *dev_priv)
|
||||
|
||||
if (IS_BROXTON(dev))
|
||||
ret = bxt_suspend_complete(dev_priv);
|
||||
else if (IS_SKYLAKE(dev))
|
||||
ret = skl_suspend_complete(dev_priv);
|
||||
else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
|
||||
ret = hsw_suspend_complete(dev_priv);
|
||||
else if (IS_VALLEYVIEW(dev))
|
||||
|
@ -56,7 +56,7 @@
|
||||
|
||||
#define DRIVER_NAME "i915"
|
||||
#define DRIVER_DESC "Intel Graphics"
|
||||
#define DRIVER_DATE "20150423"
|
||||
#define DRIVER_DATE "20150508"
|
||||
|
||||
#undef WARN_ON
|
||||
/* Many gcc seem to no see through this and fall over :( */
|
||||
@ -238,6 +238,11 @@ enum hpd_pin {
|
||||
#define for_each_crtc(dev, crtc) \
|
||||
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
|
||||
|
||||
#define for_each_intel_plane(dev, intel_plane) \
|
||||
list_for_each_entry(intel_plane, \
|
||||
&dev->mode_config.plane_list, \
|
||||
base.head)
|
||||
|
||||
#define for_each_intel_crtc(dev, intel_crtc) \
|
||||
list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, base.head)
|
||||
|
||||
@ -295,7 +300,7 @@ struct intel_dpll_hw_state {
|
||||
/* skl */
|
||||
/*
|
||||
* DPLL_CTRL1 has 6 bits for each each this DPLL. We store those in
|
||||
* lower part of crtl1 and they get shifted into position when writing
|
||||
* lower part of ctrl1 and they get shifted into position when writing
|
||||
* the register. This allows us to easily compare the state to share
|
||||
* the DPLL.
|
||||
*/
|
||||
@ -669,6 +674,22 @@ struct intel_uncore {
|
||||
#define for_each_fw_domain(domain__, dev_priv__, i__) \
|
||||
for_each_fw_domain_mask(domain__, FORCEWAKE_ALL, dev_priv__, i__)
|
||||
|
||||
enum csr_state {
|
||||
FW_UNINITIALIZED = 0,
|
||||
FW_LOADED,
|
||||
FW_FAILED
|
||||
};
|
||||
|
||||
struct intel_csr {
|
||||
const char *fw_path;
|
||||
__be32 *dmc_payload;
|
||||
uint32_t dmc_fw_size;
|
||||
uint32_t mmio_count;
|
||||
uint32_t mmioaddr[8];
|
||||
uint32_t mmiodata[8];
|
||||
enum csr_state state;
|
||||
};
|
||||
|
||||
#define DEV_INFO_FOR_EACH_FLAG(func, sep) \
|
||||
func(is_mobile) sep \
|
||||
func(is_i85x) sep \
|
||||
@ -1348,7 +1369,6 @@ struct intel_vbt_data {
|
||||
bool edp_initialized;
|
||||
bool edp_support;
|
||||
int edp_bpp;
|
||||
bool edp_low_vswing;
|
||||
struct edp_power_seq edp_pps;
|
||||
|
||||
struct {
|
||||
@ -1574,6 +1594,11 @@ struct drm_i915_private {
|
||||
|
||||
struct i915_virtual_gpu vgpu;
|
||||
|
||||
struct intel_csr csr;
|
||||
|
||||
/* Display CSR-related protection */
|
||||
struct mutex csr_lock;
|
||||
|
||||
struct intel_gmbus gmbus[GMBUS_NUM_PINS];
|
||||
|
||||
/** gmbus_mutex protects against concurrent usage of the single hw gmbus
|
||||
@ -1757,6 +1782,8 @@ struct drm_i915_private {
|
||||
|
||||
u32 fdi_rx_config;
|
||||
|
||||
u32 chv_phy_control;
|
||||
|
||||
u32 suspend_count;
|
||||
struct i915_suspend_saved_registers regfile;
|
||||
struct vlv_s0ix_state vlv_s0ix_state;
|
||||
@ -1825,6 +1852,8 @@ struct drm_i915_private {
|
||||
void (*stop_ring)(struct intel_engine_cs *ring);
|
||||
} gt;
|
||||
|
||||
bool edp_low_vswing;
|
||||
|
||||
/*
|
||||
* NOTE: This is the dri1/ums dungeon, don't add stuff here. Your patch
|
||||
* will be rejected. Instead look for a better place.
|
||||
@ -2422,10 +2451,13 @@ struct drm_i915_cmd_table {
|
||||
IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev) || \
|
||||
IS_SKYLAKE(dev))
|
||||
#define HAS_RUNTIME_PM(dev) (IS_GEN6(dev) || IS_HASWELL(dev) || \
|
||||
IS_BROADWELL(dev) || IS_VALLEYVIEW(dev))
|
||||
IS_BROADWELL(dev) || IS_VALLEYVIEW(dev) || \
|
||||
IS_SKYLAKE(dev))
|
||||
#define HAS_RC6(dev) (INTEL_INFO(dev)->gen >= 6)
|
||||
#define HAS_RC6p(dev) (INTEL_INFO(dev)->gen == 6 || IS_IVYBRIDGE(dev))
|
||||
|
||||
#define HAS_CSR(dev) (IS_SKYLAKE(dev))
|
||||
|
||||
#define INTEL_PCH_DEVICE_ID_MASK 0xff00
|
||||
#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00
|
||||
#define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00
|
||||
@ -2491,6 +2523,7 @@ struct i915_params {
|
||||
int mmio_debug;
|
||||
bool verbose_state_checks;
|
||||
bool nuclear_pageflip;
|
||||
int edp_vswing;
|
||||
};
|
||||
extern struct i915_params i915 __read_mostly;
|
||||
|
||||
@ -2516,6 +2549,7 @@ extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv);
|
||||
extern void i915_update_gfx_val(struct drm_i915_private *dev_priv);
|
||||
int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool on);
|
||||
void intel_hpd_cancel_work(struct drm_i915_private *dev_priv);
|
||||
void i915_firmware_load_error_print(const char *fw_path, int err);
|
||||
|
||||
/* i915_irq.c */
|
||||
void i915_queue_hangcheck(struct drm_device *dev);
|
||||
|
@ -1635,6 +1635,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
|
||||
struct drm_i915_gem_object *obj = to_intel_bo(vma->vm_private_data);
|
||||
struct drm_device *dev = obj->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct i915_ggtt_view view = i915_ggtt_view_normal;
|
||||
pgoff_t page_offset;
|
||||
unsigned long pfn;
|
||||
int ret = 0;
|
||||
@ -1667,8 +1668,23 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
/* Now bind it into the GTT if needed */
|
||||
ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_MAPPABLE);
|
||||
/* Use a partial view if the object is bigger than the aperture. */
|
||||
if (obj->base.size >= dev_priv->gtt.mappable_end &&
|
||||
obj->tiling_mode == I915_TILING_NONE) {
|
||||
static const unsigned int chunk_size = 256; // 1 MiB
|
||||
|
||||
memset(&view, 0, sizeof(view));
|
||||
view.type = I915_GGTT_VIEW_PARTIAL;
|
||||
view.params.partial.offset = rounddown(page_offset, chunk_size);
|
||||
view.params.partial.size =
|
||||
min_t(unsigned int,
|
||||
chunk_size,
|
||||
(vma->vm_end - vma->vm_start)/PAGE_SIZE -
|
||||
view.params.partial.offset);
|
||||
}
|
||||
|
||||
/* Now pin it into the GTT if needed */
|
||||
ret = i915_gem_object_ggtt_pin(obj, &view, 0, PIN_MAPPABLE);
|
||||
if (ret)
|
||||
goto unlock;
|
||||
|
||||
@ -1681,30 +1697,50 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
|
||||
goto unpin;
|
||||
|
||||
/* Finally, remap it using the new GTT offset */
|
||||
pfn = dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj);
|
||||
pfn = dev_priv->gtt.mappable_base +
|
||||
i915_gem_obj_ggtt_offset_view(obj, &view);
|
||||
pfn >>= PAGE_SHIFT;
|
||||
|
||||
if (!obj->fault_mappable) {
|
||||
unsigned long size = min_t(unsigned long,
|
||||
vma->vm_end - vma->vm_start,
|
||||
obj->base.size);
|
||||
int i;
|
||||
if (unlikely(view.type == I915_GGTT_VIEW_PARTIAL)) {
|
||||
/* Overriding existing pages in partial view does not cause
|
||||
* us any trouble as TLBs are still valid because the fault
|
||||
* is due to userspace losing part of the mapping or never
|
||||
* having accessed it before (at this partials' range).
|
||||
*/
|
||||
unsigned long base = vma->vm_start +
|
||||
(view.params.partial.offset << PAGE_SHIFT);
|
||||
unsigned int i;
|
||||
|
||||
for (i = 0; i < size >> PAGE_SHIFT; i++) {
|
||||
ret = vm_insert_pfn(vma,
|
||||
(unsigned long)vma->vm_start + i * PAGE_SIZE,
|
||||
pfn + i);
|
||||
for (i = 0; i < view.params.partial.size; i++) {
|
||||
ret = vm_insert_pfn(vma, base + i * PAGE_SIZE, pfn + i);
|
||||
if (ret)
|
||||
break;
|
||||
}
|
||||
|
||||
obj->fault_mappable = true;
|
||||
} else
|
||||
ret = vm_insert_pfn(vma,
|
||||
(unsigned long)vmf->virtual_address,
|
||||
pfn + page_offset);
|
||||
} else {
|
||||
if (!obj->fault_mappable) {
|
||||
unsigned long size = min_t(unsigned long,
|
||||
vma->vm_end - vma->vm_start,
|
||||
obj->base.size);
|
||||
int i;
|
||||
|
||||
for (i = 0; i < size >> PAGE_SHIFT; i++) {
|
||||
ret = vm_insert_pfn(vma,
|
||||
(unsigned long)vma->vm_start + i * PAGE_SIZE,
|
||||
pfn + i);
|
||||
if (ret)
|
||||
break;
|
||||
}
|
||||
|
||||
obj->fault_mappable = true;
|
||||
} else
|
||||
ret = vm_insert_pfn(vma,
|
||||
(unsigned long)vmf->virtual_address,
|
||||
pfn + page_offset);
|
||||
}
|
||||
unpin:
|
||||
i915_gem_object_ggtt_unpin(obj);
|
||||
i915_gem_object_ggtt_unpin_view(obj, &view);
|
||||
unlock:
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
out:
|
||||
@ -1897,11 +1933,6 @@ i915_gem_mmap_gtt(struct drm_file *file,
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
if (obj->base.size > dev_priv->gtt.mappable_end) {
|
||||
ret = -E2BIG;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (obj->madv != I915_MADV_WILLNEED) {
|
||||
DRM_DEBUG("Attempting to mmap a purgeable buffer\n");
|
||||
ret = -EFAULT;
|
||||
@ -3069,6 +3100,7 @@ int i915_vma_unbind(struct i915_vma *vma)
|
||||
trace_i915_vma_unbind(vma);
|
||||
|
||||
vma->vm->unbind_vma(vma);
|
||||
vma->bound = 0;
|
||||
|
||||
list_del_init(&vma->mm_list);
|
||||
if (i915_is_ggtt(vma->vm)) {
|
||||
@ -3497,7 +3529,8 @@ static bool i915_gem_valid_gtt_space(struct i915_vma *vma,
|
||||
}
|
||||
|
||||
/**
|
||||
* Finds free space in the GTT aperture and binds the object there.
|
||||
* Finds free space in the GTT aperture and binds the object or a view of it
|
||||
* there.
|
||||
*/
|
||||
static struct i915_vma *
|
||||
i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
|
||||
@ -3516,36 +3549,60 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
|
||||
struct i915_vma *vma;
|
||||
int ret;
|
||||
|
||||
if(WARN_ON(i915_is_ggtt(vm) != !!ggtt_view))
|
||||
return ERR_PTR(-EINVAL);
|
||||
if (i915_is_ggtt(vm)) {
|
||||
u32 view_size;
|
||||
|
||||
fence_size = i915_gem_get_gtt_size(dev,
|
||||
obj->base.size,
|
||||
obj->tiling_mode);
|
||||
fence_alignment = i915_gem_get_gtt_alignment(dev,
|
||||
obj->base.size,
|
||||
obj->tiling_mode, true);
|
||||
unfenced_alignment =
|
||||
i915_gem_get_gtt_alignment(dev,
|
||||
obj->base.size,
|
||||
obj->tiling_mode, false);
|
||||
if (WARN_ON(!ggtt_view))
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
view_size = i915_ggtt_view_size(obj, ggtt_view);
|
||||
|
||||
fence_size = i915_gem_get_gtt_size(dev,
|
||||
view_size,
|
||||
obj->tiling_mode);
|
||||
fence_alignment = i915_gem_get_gtt_alignment(dev,
|
||||
view_size,
|
||||
obj->tiling_mode,
|
||||
true);
|
||||
unfenced_alignment = i915_gem_get_gtt_alignment(dev,
|
||||
view_size,
|
||||
obj->tiling_mode,
|
||||
false);
|
||||
size = flags & PIN_MAPPABLE ? fence_size : view_size;
|
||||
} else {
|
||||
fence_size = i915_gem_get_gtt_size(dev,
|
||||
obj->base.size,
|
||||
obj->tiling_mode);
|
||||
fence_alignment = i915_gem_get_gtt_alignment(dev,
|
||||
obj->base.size,
|
||||
obj->tiling_mode,
|
||||
true);
|
||||
unfenced_alignment =
|
||||
i915_gem_get_gtt_alignment(dev,
|
||||
obj->base.size,
|
||||
obj->tiling_mode,
|
||||
false);
|
||||
size = flags & PIN_MAPPABLE ? fence_size : obj->base.size;
|
||||
}
|
||||
|
||||
if (alignment == 0)
|
||||
alignment = flags & PIN_MAPPABLE ? fence_alignment :
|
||||
unfenced_alignment;
|
||||
if (flags & PIN_MAPPABLE && alignment & (fence_alignment - 1)) {
|
||||
DRM_DEBUG("Invalid object alignment requested %u\n", alignment);
|
||||
DRM_DEBUG("Invalid object (view type=%u) alignment requested %u\n",
|
||||
ggtt_view ? ggtt_view->type : 0,
|
||||
alignment);
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
size = flags & PIN_MAPPABLE ? fence_size : obj->base.size;
|
||||
|
||||
/* If the object is bigger than the entire aperture, reject it early
|
||||
* before evicting everything in a vain attempt to find space.
|
||||
/* If binding the object/GGTT view requires more space than the entire
|
||||
* aperture has, reject it early before evicting everything in a vain
|
||||
* attempt to find space.
|
||||
*/
|
||||
if (obj->base.size > end) {
|
||||
DRM_DEBUG("Attempting to bind an object larger than the aperture: object=%zd > %s aperture=%lu\n",
|
||||
obj->base.size,
|
||||
if (size > end) {
|
||||
DRM_DEBUG("Attempting to bind an object (view type=%u) larger than the aperture: size=%u > %s aperture=%lu\n",
|
||||
ggtt_view ? ggtt_view->type : 0,
|
||||
size,
|
||||
flags & PIN_MAPPABLE ? "mappable" : "total",
|
||||
end);
|
||||
return ERR_PTR(-E2BIG);
|
||||
@ -3841,17 +3898,10 @@ int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
|
||||
{
|
||||
struct drm_i915_gem_caching *args = data;
|
||||
struct drm_i915_gem_object *obj;
|
||||
int ret;
|
||||
|
||||
ret = i915_mutex_lock_interruptible(dev);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
|
||||
if (&obj->base == NULL) {
|
||||
ret = -ENOENT;
|
||||
goto unlock;
|
||||
}
|
||||
if (&obj->base == NULL)
|
||||
return -ENOENT;
|
||||
|
||||
switch (obj->cache_level) {
|
||||
case I915_CACHE_LLC:
|
||||
@ -3868,10 +3918,8 @@ int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
|
||||
break;
|
||||
}
|
||||
|
||||
drm_gem_object_unreference(&obj->base);
|
||||
unlock:
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
return ret;
|
||||
drm_gem_object_unreference_unlocked(&obj->base);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
|
||||
@ -4207,7 +4255,8 @@ i915_gem_object_do_pin(struct drm_i915_gem_object *obj,
|
||||
return ret;
|
||||
}
|
||||
|
||||
if ((bound ^ vma->bound) & GLOBAL_BIND) {
|
||||
if (ggtt_view && ggtt_view->type == I915_GGTT_VIEW_NORMAL &&
|
||||
(bound ^ vma->bound) & GLOBAL_BIND) {
|
||||
bool mappable, fenceable;
|
||||
u32 fence_size, fence_alignment;
|
||||
|
||||
@ -4226,9 +4275,9 @@ i915_gem_object_do_pin(struct drm_i915_gem_object *obj,
|
||||
dev_priv->gtt.mappable_end);
|
||||
|
||||
obj->map_and_fenceable = mappable && fenceable;
|
||||
}
|
||||
|
||||
WARN_ON(flags & PIN_MAPPABLE && !obj->map_and_fenceable);
|
||||
WARN_ON(flags & PIN_MAPPABLE && !obj->map_and_fenceable);
|
||||
}
|
||||
|
||||
vma->pin_count++;
|
||||
return 0;
|
||||
@ -5226,13 +5275,10 @@ unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o,
|
||||
bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
struct i915_vma *vma;
|
||||
list_for_each_entry(vma, &obj->vma_list, vma_link) {
|
||||
if (i915_is_ggtt(vma->vm) &&
|
||||
vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL)
|
||||
continue;
|
||||
list_for_each_entry(vma, &obj->vma_list, vma_link)
|
||||
if (vma->pin_count > 0)
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -1540,29 +1540,39 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
|
||||
}
|
||||
|
||||
if (i915_needs_cmd_parser(ring) && args->batch_len) {
|
||||
batch_obj = i915_gem_execbuffer_parse(ring,
|
||||
struct drm_i915_gem_object *parsed_batch_obj;
|
||||
|
||||
parsed_batch_obj = i915_gem_execbuffer_parse(ring,
|
||||
&shadow_exec_entry,
|
||||
eb,
|
||||
batch_obj,
|
||||
args->batch_start_offset,
|
||||
args->batch_len,
|
||||
file->is_master);
|
||||
if (IS_ERR(batch_obj)) {
|
||||
ret = PTR_ERR(batch_obj);
|
||||
if (IS_ERR(parsed_batch_obj)) {
|
||||
ret = PTR_ERR(parsed_batch_obj);
|
||||
goto err;
|
||||
}
|
||||
|
||||
/*
|
||||
* Set the DISPATCH_SECURE bit to remove the NON_SECURE
|
||||
* bit from MI_BATCH_BUFFER_START commands issued in the
|
||||
* dispatch_execbuffer implementations. We specifically
|
||||
* don't want that set when the command parser is
|
||||
* enabled.
|
||||
* parsed_batch_obj == batch_obj means batch not fully parsed:
|
||||
* Accept, but don't promote to secure.
|
||||
*/
|
||||
if (USES_PPGTT(dev))
|
||||
dispatch_flags |= I915_DISPATCH_SECURE;
|
||||
|
||||
exec_start = 0;
|
||||
if (parsed_batch_obj != batch_obj) {
|
||||
/*
|
||||
* Batch parsed and accepted:
|
||||
*
|
||||
* Set the DISPATCH_SECURE bit to remove the NON_SECURE
|
||||
* bit from MI_BATCH_BUFFER_START commands issued in
|
||||
* the dispatch_execbuffer implementations. We
|
||||
* specifically don't want that set on batches the
|
||||
* command parser has accepted.
|
||||
*/
|
||||
dispatch_flags |= I915_DISPATCH_SECURE;
|
||||
exec_start = 0;
|
||||
batch_obj = parsed_batch_obj;
|
||||
}
|
||||
}
|
||||
|
||||
batch_obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
|
||||
|
@ -756,8 +756,8 @@ static int gen8_ppgtt_alloc_page_directories(struct i915_hw_ppgtt *ppgtt,
|
||||
|
||||
WARN_ON(!bitmap_empty(new_pds, GEN8_LEGACY_PDPES));
|
||||
|
||||
/* FIXME: PPGTT container_of won't work for 64b */
|
||||
WARN_ON((start + length) > 0x800000000ULL);
|
||||
/* FIXME: upper bound must not overflow 32 bits */
|
||||
WARN_ON((start + length) >= (1ULL << 32));
|
||||
|
||||
gen8_for_each_pdpe(pd, pdp, start, length, temp, pdpe) {
|
||||
if (pd)
|
||||
@ -844,15 +844,6 @@ static int gen8_alloc_va_range(struct i915_address_space *vm,
|
||||
uint32_t pdpe;
|
||||
int ret;
|
||||
|
||||
#ifndef CONFIG_64BIT
|
||||
/* Disallow 64b address on 32b platforms. Nothing is wrong with doing
|
||||
* this in hardware, but a lot of the drm code is not prepared to handle
|
||||
* 64b offset on 32b platforms.
|
||||
* This will be addressed when 48b PPGTT is added */
|
||||
if (start + length > 0x100000000ULL)
|
||||
return -E2BIG;
|
||||
#endif
|
||||
|
||||
/* Wrap is never okay since we can only represent 48b, and we don't
|
||||
* actually use the other side of the canonical address space.
|
||||
*/
|
||||
@ -1945,19 +1936,23 @@ static void ggtt_unbind_vma(struct i915_vma *vma)
|
||||
struct drm_device *dev = vma->vm->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_gem_object *obj = vma->obj;
|
||||
const uint64_t size = min_t(uint64_t,
|
||||
obj->base.size,
|
||||
vma->node.size);
|
||||
|
||||
if (vma->bound & GLOBAL_BIND) {
|
||||
vma->vm->clear_range(vma->vm,
|
||||
vma->node.start,
|
||||
obj->base.size,
|
||||
size,
|
||||
true);
|
||||
}
|
||||
|
||||
if (dev_priv->mm.aliasing_ppgtt && vma->bound & LOCAL_BIND) {
|
||||
struct i915_hw_ppgtt *appgtt = dev_priv->mm.aliasing_ppgtt;
|
||||
|
||||
appgtt->base.clear_range(&appgtt->base,
|
||||
vma->node.start,
|
||||
obj->base.size,
|
||||
size,
|
||||
true);
|
||||
}
|
||||
}
|
||||
@ -2758,6 +2753,47 @@ err_st_alloc:
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
static struct sg_table *
|
||||
intel_partial_pages(const struct i915_ggtt_view *view,
|
||||
struct drm_i915_gem_object *obj)
|
||||
{
|
||||
struct sg_table *st;
|
||||
struct scatterlist *sg;
|
||||
struct sg_page_iter obj_sg_iter;
|
||||
int ret = -ENOMEM;
|
||||
|
||||
st = kmalloc(sizeof(*st), GFP_KERNEL);
|
||||
if (!st)
|
||||
goto err_st_alloc;
|
||||
|
||||
ret = sg_alloc_table(st, view->params.partial.size, GFP_KERNEL);
|
||||
if (ret)
|
||||
goto err_sg_alloc;
|
||||
|
||||
sg = st->sgl;
|
||||
st->nents = 0;
|
||||
for_each_sg_page(obj->pages->sgl, &obj_sg_iter, obj->pages->nents,
|
||||
view->params.partial.offset)
|
||||
{
|
||||
if (st->nents >= view->params.partial.size)
|
||||
break;
|
||||
|
||||
sg_set_page(sg, NULL, PAGE_SIZE, 0);
|
||||
sg_dma_address(sg) = sg_page_iter_dma_address(&obj_sg_iter);
|
||||
sg_dma_len(sg) = PAGE_SIZE;
|
||||
|
||||
sg = sg_next(sg);
|
||||
st->nents++;
|
||||
}
|
||||
|
||||
return st;
|
||||
|
||||
err_sg_alloc:
|
||||
kfree(st);
|
||||
err_st_alloc:
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
static int
|
||||
i915_get_ggtt_vma_pages(struct i915_vma *vma)
|
||||
{
|
||||
@ -2771,6 +2807,9 @@ i915_get_ggtt_vma_pages(struct i915_vma *vma)
|
||||
else if (vma->ggtt_view.type == I915_GGTT_VIEW_ROTATED)
|
||||
vma->ggtt_view.pages =
|
||||
intel_rotate_fb_obj_pages(&vma->ggtt_view, vma->obj);
|
||||
else if (vma->ggtt_view.type == I915_GGTT_VIEW_PARTIAL)
|
||||
vma->ggtt_view.pages =
|
||||
intel_partial_pages(&vma->ggtt_view, vma->obj);
|
||||
else
|
||||
WARN_ONCE(1, "GGTT view %u not implemented!\n",
|
||||
vma->ggtt_view.type);
|
||||
@ -2843,3 +2882,25 @@ int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* i915_ggtt_view_size - Get the size of a GGTT view.
|
||||
* @obj: Object the view is of.
|
||||
* @view: The view in question.
|
||||
*
|
||||
* @return The size of the GGTT view in bytes.
|
||||
*/
|
||||
size_t
|
||||
i915_ggtt_view_size(struct drm_i915_gem_object *obj,
|
||||
const struct i915_ggtt_view *view)
|
||||
{
|
||||
if (view->type == I915_GGTT_VIEW_NORMAL ||
|
||||
view->type == I915_GGTT_VIEW_ROTATED) {
|
||||
return obj->base.size;
|
||||
} else if (view->type == I915_GGTT_VIEW_PARTIAL) {
|
||||
return view->params.partial.size << PAGE_SHIFT;
|
||||
} else {
|
||||
WARN_ONCE(1, "GGTT view %u not implemented!\n", view->type);
|
||||
return obj->base.size;
|
||||
}
|
||||
}
|
||||
|
@ -117,7 +117,8 @@ typedef uint64_t gen8_pde_t;
|
||||
|
||||
enum i915_ggtt_view_type {
|
||||
I915_GGTT_VIEW_NORMAL = 0,
|
||||
I915_GGTT_VIEW_ROTATED
|
||||
I915_GGTT_VIEW_ROTATED,
|
||||
I915_GGTT_VIEW_PARTIAL,
|
||||
};
|
||||
|
||||
struct intel_rotation_info {
|
||||
@ -130,6 +131,13 @@ struct intel_rotation_info {
|
||||
struct i915_ggtt_view {
|
||||
enum i915_ggtt_view_type type;
|
||||
|
||||
union {
|
||||
struct {
|
||||
unsigned long offset;
|
||||
unsigned int size;
|
||||
} partial;
|
||||
} params;
|
||||
|
||||
struct sg_table *pages;
|
||||
|
||||
union {
|
||||
@ -495,7 +503,15 @@ i915_ggtt_view_equal(const struct i915_ggtt_view *a,
|
||||
if (WARN_ON(!a || !b))
|
||||
return false;
|
||||
|
||||
return a->type == b->type;
|
||||
if (a->type != b->type)
|
||||
return false;
|
||||
if (a->type == I915_GGTT_VIEW_PARTIAL)
|
||||
return !memcmp(&a->params, &b->params, sizeof(a->params));
|
||||
return true;
|
||||
}
|
||||
|
||||
size_t
|
||||
i915_ggtt_view_size(struct drm_i915_gem_object *obj,
|
||||
const struct i915_ggtt_view *view);
|
||||
|
||||
#endif
|
||||
|
@ -554,6 +554,7 @@ static void i915_error_state_free(struct kref *error_ref)
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
|
||||
i915_error_object_free(error->ring[i].batchbuffer);
|
||||
i915_error_object_free(error->ring[i].wa_batchbuffer);
|
||||
i915_error_object_free(error->ring[i].ringbuffer);
|
||||
i915_error_object_free(error->ring[i].hws_page);
|
||||
i915_error_object_free(error->ring[i].ctx);
|
||||
|
@ -53,6 +53,7 @@ struct i915_params i915 __read_mostly = {
|
||||
.mmio_debug = 0,
|
||||
.verbose_state_checks = 1,
|
||||
.nuclear_pageflip = 0,
|
||||
.edp_vswing = 0,
|
||||
};
|
||||
|
||||
module_param_named(modeset, i915.modeset, int, 0400);
|
||||
@ -184,3 +185,10 @@ MODULE_PARM_DESC(verbose_state_checks,
|
||||
module_param_named_unsafe(nuclear_pageflip, i915.nuclear_pageflip, bool, 0600);
|
||||
MODULE_PARM_DESC(nuclear_pageflip,
|
||||
"Force atomic modeset functionality; only planes work for now (default: false).");
|
||||
|
||||
/* WA to get away with the default setting in VBT for early platforms.Will be removed */
|
||||
module_param_named_unsafe(edp_vswing, i915.edp_vswing, int, 0400);
|
||||
MODULE_PARM_DESC(edp_vswing,
|
||||
"Ignore/Override vswing pre-emph table selection from VBT "
|
||||
"(0=use value from vbt [default], 1=low power swing(200mV),"
|
||||
"2=default swing(400mV))");
|
||||
|
@ -670,6 +670,12 @@ enum skl_disp_power_wells {
|
||||
#define FB_FMAX_VMIN_FREQ_LO_SHIFT 27
|
||||
#define FB_FMAX_VMIN_FREQ_LO_MASK 0xf8000000
|
||||
|
||||
#define VLV_TURBO_SOC_OVERRIDE 0x04
|
||||
#define VLV_OVERRIDE_EN 1
|
||||
#define VLV_SOC_TDP_EN (1 << 1)
|
||||
#define VLV_BIAS_CPU_125_SOC_875 (6 << 2)
|
||||
#define CHV_BIAS_CPU_50_SOC_50 (3 << 2)
|
||||
|
||||
#define VLV_CZ_CLOCK_TO_MILLI_SEC 100000
|
||||
|
||||
/* vlv2 north clock has */
|
||||
@ -955,6 +961,7 @@ enum skl_disp_power_wells {
|
||||
|
||||
#define _VLV_PCS_DW11_CH0 0x822c
|
||||
#define _VLV_PCS_DW11_CH1 0x842c
|
||||
#define DPIO_TX2_STAGGER_MASK(x) ((x)<<24)
|
||||
#define DPIO_LANEDESKEW_STRAP_OVRD (1<<3)
|
||||
#define DPIO_LEFT_TXFIFO_RST_MASTER (1<<1)
|
||||
#define DPIO_RIGHT_TXFIFO_RST_MASTER (1<<0)
|
||||
@ -967,8 +974,20 @@ enum skl_disp_power_wells {
|
||||
#define VLV_PCS01_DW11(ch) _PORT(ch, _VLV_PCS01_DW11_CH0, _VLV_PCS01_DW11_CH1)
|
||||
#define VLV_PCS23_DW11(ch) _PORT(ch, _VLV_PCS23_DW11_CH0, _VLV_PCS23_DW11_CH1)
|
||||
|
||||
#define _VLV_PCS01_DW12_CH0 0x0230
|
||||
#define _VLV_PCS23_DW12_CH0 0x0430
|
||||
#define _VLV_PCS01_DW12_CH1 0x2630
|
||||
#define _VLV_PCS23_DW12_CH1 0x2830
|
||||
#define VLV_PCS01_DW12(ch) _PORT(ch, _VLV_PCS01_DW12_CH0, _VLV_PCS01_DW12_CH1)
|
||||
#define VLV_PCS23_DW12(ch) _PORT(ch, _VLV_PCS23_DW12_CH0, _VLV_PCS23_DW12_CH1)
|
||||
|
||||
#define _VLV_PCS_DW12_CH0 0x8230
|
||||
#define _VLV_PCS_DW12_CH1 0x8430
|
||||
#define DPIO_TX2_STAGGER_MULT(x) ((x)<<20)
|
||||
#define DPIO_TX1_STAGGER_MULT(x) ((x)<<16)
|
||||
#define DPIO_TX1_STAGGER_MASK(x) ((x)<<8)
|
||||
#define DPIO_LANESTAGGER_STRAP_OVRD (1<<6)
|
||||
#define DPIO_LANESTAGGER_STRAP(x) ((x)<<0)
|
||||
#define VLV_PCS_DW12(ch) _PORT(ch, _VLV_PCS_DW12_CH0, _VLV_PCS_DW12_CH1)
|
||||
|
||||
#define _VLV_PCS_DW14_CH0 0x8238
|
||||
@ -2118,7 +2137,10 @@ enum skl_disp_power_wells {
|
||||
#define DPIO_PHY_STATUS (VLV_DISPLAY_BASE + 0x6240)
|
||||
#define DPLL_PORTD_READY_MASK (0xf)
|
||||
#define DISPLAY_PHY_CONTROL (VLV_DISPLAY_BASE + 0x60100)
|
||||
#define PHY_COM_LANE_RESET_DEASSERT(phy) (1 << (phy))
|
||||
#define PHY_CH_SU_PSR 0x1
|
||||
#define PHY_CH_DEEP_PSR 0x7
|
||||
#define PHY_CH_POWER_MODE(mode, phy, ch) ((mode) << (6*(phy)+3*(ch)+2))
|
||||
#define PHY_COM_LANE_RESET_DEASSERT(phy) (1 << (phy))
|
||||
#define DISPLAY_PHY_STATUS (VLV_DISPLAY_BASE + 0x60104)
|
||||
#define PHY_POWERGOOD(phy) (((phy) == DPIO_PHY0) ? (1<<31) : (1<<30))
|
||||
|
||||
@ -3480,6 +3502,18 @@ enum skl_disp_power_wells {
|
||||
#define UTIL_PIN_CTL 0x48400
|
||||
#define UTIL_PIN_ENABLE (1 << 31)
|
||||
|
||||
/* BXT backlight register definition. */
|
||||
#define BXT_BLC_PWM_CTL1 0xC8250
|
||||
#define BXT_BLC_PWM_ENABLE (1 << 31)
|
||||
#define BXT_BLC_PWM_POLARITY (1 << 29)
|
||||
#define BXT_BLC_PWM_FREQ1 0xC8254
|
||||
#define BXT_BLC_PWM_DUTY1 0xC8258
|
||||
|
||||
#define BXT_BLC_PWM_CTL2 0xC8350
|
||||
#define BXT_BLC_PWM_FREQ2 0xC8354
|
||||
#define BXT_BLC_PWM_DUTY2 0xC8358
|
||||
|
||||
|
||||
#define PCH_GTC_CTL 0xe7000
|
||||
#define PCH_GTC_ENABLE (1 << 31)
|
||||
|
||||
@ -5700,7 +5734,7 @@ enum skl_disp_power_wells {
|
||||
#define HSW_NDE_RSTWRN_OPT 0x46408
|
||||
#define RESET_PCH_HANDSHAKE_ENABLE (1<<4)
|
||||
|
||||
#define FF_SLICE_CS_CHICKEN2 0x02e4
|
||||
#define FF_SLICE_CS_CHICKEN2 0x20e4
|
||||
#define GEN9_TSG_BARRIER_ACK_DISABLE (1<<8)
|
||||
|
||||
/* GEN7 chicken */
|
||||
@ -6638,15 +6672,20 @@ enum skl_disp_power_wells {
|
||||
|
||||
#define GEN6_PCODE_MAILBOX 0x138124
|
||||
#define GEN6_PCODE_READY (1<<31)
|
||||
#define GEN6_READ_OC_PARAMS 0xc
|
||||
#define GEN6_PCODE_WRITE_MIN_FREQ_TABLE 0x8
|
||||
#define GEN6_PCODE_READ_MIN_FREQ_TABLE 0x9
|
||||
#define GEN6_PCODE_WRITE_RC6VIDS 0x4
|
||||
#define GEN6_PCODE_READ_RC6VIDS 0x5
|
||||
#define GEN6_ENCODE_RC6_VID(mv) (((mv) - 245) / 5)
|
||||
#define GEN6_DECODE_RC6_VID(vids) (((vids) * 5) + 245)
|
||||
#define GEN9_PCODE_READ_MEM_LATENCY 0x6
|
||||
#define GEN9_MEM_LATENCY_LEVEL_MASK 0xFF
|
||||
#define GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT 8
|
||||
#define GEN9_MEM_LATENCY_LEVEL_2_6_SHIFT 16
|
||||
#define GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT 24
|
||||
#define GEN6_PCODE_WRITE_MIN_FREQ_TABLE 0x8
|
||||
#define GEN6_PCODE_READ_MIN_FREQ_TABLE 0x9
|
||||
#define GEN6_READ_OC_PARAMS 0xc
|
||||
#define GEN6_PCODE_READ_D_COMP 0x10
|
||||
#define GEN6_PCODE_WRITE_D_COMP 0x11
|
||||
#define GEN6_ENCODE_RC6_VID(mv) (((mv) - 245) / 5)
|
||||
#define GEN6_DECODE_RC6_VID(vids) (((vids) * 5) + 245)
|
||||
#define HSW_PCODE_DE_WRITE_FREQ_REQ 0x17
|
||||
#define DISPLAY_IPS_CONTROL 0x19
|
||||
#define HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL 0x1A
|
||||
@ -6655,12 +6694,6 @@ enum skl_disp_power_wells {
|
||||
#define GEN6_PCODE_FREQ_RING_RATIO_SHIFT 16
|
||||
#define GEN6_PCODE_DATA1 0x13812C
|
||||
|
||||
#define GEN9_PCODE_READ_MEM_LATENCY 0x6
|
||||
#define GEN9_MEM_LATENCY_LEVEL_MASK 0xFF
|
||||
#define GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT 8
|
||||
#define GEN9_MEM_LATENCY_LEVEL_2_6_SHIFT 16
|
||||
#define GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT 24
|
||||
|
||||
#define GEN6_GT_CORE_STATUS 0x138060
|
||||
#define GEN6_CORE_CPD_STATE_MASK (7<<4)
|
||||
#define GEN6_RCn_MASK 7
|
||||
@ -6721,6 +6754,7 @@ enum skl_disp_power_wells {
|
||||
#define GEN7_HALF_SLICE_CHICKEN1_GT2 0xf100
|
||||
#define GEN7_MAX_PS_THREAD_DEP (8<<12)
|
||||
#define GEN7_SINGLE_SUBSCAN_DISPATCH_ENABLE (1<<10)
|
||||
#define GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE (1<<4)
|
||||
#define GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE (1<<3)
|
||||
|
||||
#define GEN9_HALF_SLICE_CHICKEN5 0xe188
|
||||
@ -7135,16 +7169,16 @@ enum skl_disp_power_wells {
|
||||
#define DPLL_CTRL1 0x6C058
|
||||
#define DPLL_CTRL1_HDMI_MODE(id) (1<<((id)*6+5))
|
||||
#define DPLL_CTRL1_SSC(id) (1<<((id)*6+4))
|
||||
#define DPLL_CRTL1_LINK_RATE_MASK(id) (7<<((id)*6+1))
|
||||
#define DPLL_CRTL1_LINK_RATE_SHIFT(id) ((id)*6+1)
|
||||
#define DPLL_CRTL1_LINK_RATE(linkrate, id) ((linkrate)<<((id)*6+1))
|
||||
#define DPLL_CTRL1_LINK_RATE_MASK(id) (7<<((id)*6+1))
|
||||
#define DPLL_CTRL1_LINK_RATE_SHIFT(id) ((id)*6+1)
|
||||
#define DPLL_CTRL1_LINK_RATE(linkrate, id) ((linkrate)<<((id)*6+1))
|
||||
#define DPLL_CTRL1_OVERRIDE(id) (1<<((id)*6))
|
||||
#define DPLL_CRTL1_LINK_RATE_2700 0
|
||||
#define DPLL_CRTL1_LINK_RATE_1350 1
|
||||
#define DPLL_CRTL1_LINK_RATE_810 2
|
||||
#define DPLL_CRTL1_LINK_RATE_1620 3
|
||||
#define DPLL_CRTL1_LINK_RATE_1080 4
|
||||
#define DPLL_CRTL1_LINK_RATE_2160 5
|
||||
#define DPLL_CTRL1_LINK_RATE_2700 0
|
||||
#define DPLL_CTRL1_LINK_RATE_1350 1
|
||||
#define DPLL_CTRL1_LINK_RATE_810 2
|
||||
#define DPLL_CTRL1_LINK_RATE_1620 3
|
||||
#define DPLL_CTRL1_LINK_RATE_1080 4
|
||||
#define DPLL_CTRL1_LINK_RATE_2160 5
|
||||
|
||||
/* DPLL control2 */
|
||||
#define DPLL_CTRL2 0x6C05C
|
||||
@ -7204,6 +7238,17 @@ enum skl_disp_power_wells {
|
||||
#define DC_STATE_EN_UPTO_DC5 (1<<0)
|
||||
#define DC_STATE_EN_DC9 (1<<3)
|
||||
|
||||
/*
|
||||
* SKL DC
|
||||
*/
|
||||
#define DC_STATE_EN 0x45504
|
||||
#define DC_STATE_EN_UPTO_DC5 (1<<0)
|
||||
#define DC_STATE_EN_UPTO_DC6 (2<<0)
|
||||
#define DC_STATE_EN_UPTO_DC5_DC6_MASK 0x3
|
||||
|
||||
#define DC_STATE_DEBUG 0x45520
|
||||
#define DC_STATE_DEBUG_MASK_MEMORY_UP (1<<1)
|
||||
|
||||
/* Please see hsw_read_dcomp() and hsw_write_dcomp() before using this register,
|
||||
* since on HSW we can't write to it using I915_WRITE. */
|
||||
#define D_COMP_HSW (MCHBAR_MIRROR_BASE_SNB + 0x5F0C)
|
||||
|
@ -169,7 +169,7 @@ int intel_atomic_commit(struct drm_device *dev,
|
||||
plane->state->state = NULL;
|
||||
}
|
||||
|
||||
/* swap crtc_state */
|
||||
/* swap crtc_scaler_state */
|
||||
for (i = 0; i < dev->mode_config.num_crtc; i++) {
|
||||
struct drm_crtc *crtc = state->crtcs[i];
|
||||
if (!crtc) {
|
||||
@ -178,6 +178,9 @@ int intel_atomic_commit(struct drm_device *dev,
|
||||
|
||||
to_intel_crtc(crtc)->config->scaler_state =
|
||||
to_intel_crtc_state(state->crtc_states[i])->scaler_state;
|
||||
|
||||
if (INTEL_INFO(dev)->gen >= 9)
|
||||
skl_detach_scalers(to_intel_crtc(crtc));
|
||||
}
|
||||
|
||||
drm_atomic_helper_commit_planes(dev, state);
|
||||
@ -247,8 +250,12 @@ intel_crtc_duplicate_state(struct drm_crtc *crtc)
|
||||
crtc_state = kmemdup(intel_crtc->config,
|
||||
sizeof(*intel_crtc->config), GFP_KERNEL);
|
||||
|
||||
if (crtc_state)
|
||||
crtc_state->base.crtc = crtc;
|
||||
if (!crtc_state)
|
||||
return NULL;
|
||||
|
||||
__drm_atomic_helper_crtc_duplicate_state(crtc, &crtc_state->base);
|
||||
|
||||
crtc_state->base.crtc = crtc;
|
||||
|
||||
return &crtc_state->base;
|
||||
}
|
||||
|
@ -85,8 +85,8 @@ intel_plane_duplicate_state(struct drm_plane *plane)
|
||||
return NULL;
|
||||
|
||||
state = &intel_state->base;
|
||||
if (state->fb)
|
||||
drm_framebuffer_reference(state->fb);
|
||||
|
||||
__drm_atomic_helper_plane_duplicate_state(plane, state);
|
||||
|
||||
return state;
|
||||
}
|
||||
@ -111,6 +111,7 @@ static int intel_plane_atomic_check(struct drm_plane *plane,
|
||||
{
|
||||
struct drm_crtc *crtc = state->crtc;
|
||||
struct intel_crtc *intel_crtc;
|
||||
struct intel_crtc_state *crtc_state;
|
||||
struct intel_plane *intel_plane = to_intel_plane(plane);
|
||||
struct intel_plane_state *intel_state = to_intel_plane_state(state);
|
||||
|
||||
@ -126,6 +127,17 @@ static int intel_plane_atomic_check(struct drm_plane *plane,
|
||||
if (!crtc)
|
||||
return 0;
|
||||
|
||||
/* FIXME: temporary hack necessary while we still use the plane update
|
||||
* helper. */
|
||||
if (state->state) {
|
||||
crtc_state =
|
||||
intel_atomic_get_crtc_state(state->state, intel_crtc);
|
||||
if (IS_ERR(crtc_state))
|
||||
return PTR_ERR(crtc_state);
|
||||
} else {
|
||||
crtc_state = intel_crtc->config;
|
||||
}
|
||||
|
||||
/*
|
||||
* The original src/dest coordinates are stored in state->base, but
|
||||
* we want to keep another copy internal to our driver that we can
|
||||
@ -144,9 +156,9 @@ static int intel_plane_atomic_check(struct drm_plane *plane,
|
||||
intel_state->clip.x1 = 0;
|
||||
intel_state->clip.y1 = 0;
|
||||
intel_state->clip.x2 =
|
||||
intel_crtc->active ? intel_crtc->config->pipe_src_w : 0;
|
||||
crtc_state->base.active ? crtc_state->pipe_src_w : 0;
|
||||
intel_state->clip.y2 =
|
||||
intel_crtc->active ? intel_crtc->config->pipe_src_h : 0;
|
||||
crtc_state->base.active ? crtc_state->pipe_src_h : 0;
|
||||
|
||||
/*
|
||||
* Disabling a plane is always okay; we just need to update
|
||||
|
@ -269,6 +269,9 @@ static void ilk_audio_codec_disable(struct intel_encoder *encoder)
|
||||
DRM_DEBUG_KMS("Disable audio codec on port %c, pipe %c\n",
|
||||
port_name(port), pipe_name(pipe));
|
||||
|
||||
if (WARN_ON(port == PORT_A))
|
||||
return;
|
||||
|
||||
if (HAS_PCH_IBX(dev_priv->dev)) {
|
||||
aud_config = IBX_AUD_CFG(pipe);
|
||||
aud_cntrl_st2 = IBX_AUD_CNTL_ST2;
|
||||
@ -290,12 +293,7 @@ static void ilk_audio_codec_disable(struct intel_encoder *encoder)
|
||||
tmp |= AUD_CONFIG_N_VALUE_INDEX;
|
||||
I915_WRITE(aud_config, tmp);
|
||||
|
||||
if (WARN_ON(!port)) {
|
||||
eldv = IBX_ELD_VALID(PORT_B) | IBX_ELD_VALID(PORT_C) |
|
||||
IBX_ELD_VALID(PORT_D);
|
||||
} else {
|
||||
eldv = IBX_ELD_VALID(port);
|
||||
}
|
||||
eldv = IBX_ELD_VALID(port);
|
||||
|
||||
/* Invalidate ELD */
|
||||
tmp = I915_READ(aud_cntrl_st2);
|
||||
@ -325,6 +323,9 @@ static void ilk_audio_codec_enable(struct drm_connector *connector,
|
||||
DRM_DEBUG_KMS("Enable audio codec on port %c, pipe %c, %u bytes ELD\n",
|
||||
port_name(port), pipe_name(pipe), drm_eld_size(eld));
|
||||
|
||||
if (WARN_ON(port == PORT_A))
|
||||
return;
|
||||
|
||||
/*
|
||||
* FIXME: We're supposed to wait for vblank here, but we have vblanks
|
||||
* disabled during the mode set. The proper fix would be to push the
|
||||
@ -349,12 +350,7 @@ static void ilk_audio_codec_enable(struct drm_connector *connector,
|
||||
aud_cntrl_st2 = CPT_AUD_CNTRL_ST2;
|
||||
}
|
||||
|
||||
if (WARN_ON(!port)) {
|
||||
eldv = IBX_ELD_VALID(PORT_B) | IBX_ELD_VALID(PORT_C) |
|
||||
IBX_ELD_VALID(PORT_D);
|
||||
} else {
|
||||
eldv = IBX_ELD_VALID(port);
|
||||
}
|
||||
eldv = IBX_ELD_VALID(port);
|
||||
|
||||
/* Invalidate ELD */
|
||||
tmp = I915_READ(aud_cntrl_st2);
|
||||
|
@ -672,8 +672,13 @@ parse_edp(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
|
||||
if (bdb->version >= 173) {
|
||||
uint8_t vswing;
|
||||
|
||||
vswing = (edp->edp_vswing_preemph >> (panel_type * 4)) & 0xF;
|
||||
dev_priv->vbt.edp_low_vswing = vswing == 0;
|
||||
/* Don't read from VBT if module parameter has valid value*/
|
||||
if (i915.edp_vswing) {
|
||||
dev_priv->edp_low_vswing = i915.edp_vswing == 1;
|
||||
} else {
|
||||
vswing = (edp->edp_vswing_preemph >> (panel_type * 4)) & 0xF;
|
||||
dev_priv->edp_low_vswing = vswing == 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
414
drivers/gpu/drm/i915/intel_csr.c
Normal file
414
drivers/gpu/drm/i915/intel_csr.c
Normal file
@ -0,0 +1,414 @@
|
||||
/*
|
||||
* Copyright © 2014 Intel Corporation
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the next
|
||||
* paragraph) shall be included in all copies or substantial portions of the
|
||||
* Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
||||
* IN THE SOFTWARE.
|
||||
*
|
||||
*/
|
||||
#include <linux/firmware.h>
|
||||
#include "i915_drv.h"
|
||||
#include "i915_reg.h"
|
||||
|
||||
#define I915_CSR_SKL "i915/skl_dmc_ver4.bin"
|
||||
|
||||
MODULE_FIRMWARE(I915_CSR_SKL);
|
||||
|
||||
/*
|
||||
* SKL CSR registers for DC5 and DC6
|
||||
*/
|
||||
#define CSR_PROGRAM_BASE 0x80000
|
||||
#define CSR_SSP_BASE_ADDR_GEN9 0x00002FC0
|
||||
#define CSR_HTP_ADDR_SKL 0x00500034
|
||||
#define CSR_SSP_BASE 0x8F074
|
||||
#define CSR_HTP_SKL 0x8F004
|
||||
#define CSR_LAST_WRITE 0x8F034
|
||||
#define CSR_LAST_WRITE_VALUE 0xc003b400
|
||||
/* MMIO address range for CSR program (0x80000 - 0x82FFF) */
|
||||
#define CSR_MAX_FW_SIZE 0x2FFF
|
||||
#define CSR_DEFAULT_FW_OFFSET 0xFFFFFFFF
|
||||
#define CSR_MMIO_START_RANGE 0x80000
|
||||
#define CSR_MMIO_END_RANGE 0x8FFFF
|
||||
|
||||
struct intel_css_header {
|
||||
/* 0x09 for DMC */
|
||||
uint32_t module_type;
|
||||
|
||||
/* Includes the DMC specific header in dwords */
|
||||
uint32_t header_len;
|
||||
|
||||
/* always value would be 0x10000 */
|
||||
uint32_t header_ver;
|
||||
|
||||
/* Not used */
|
||||
uint32_t module_id;
|
||||
|
||||
/* Not used */
|
||||
uint32_t module_vendor;
|
||||
|
||||
/* in YYYYMMDD format */
|
||||
uint32_t date;
|
||||
|
||||
/* Size in dwords (CSS_Headerlen + PackageHeaderLen + dmc FWsLen)/4 */
|
||||
uint32_t size;
|
||||
|
||||
/* Not used */
|
||||
uint32_t key_size;
|
||||
|
||||
/* Not used */
|
||||
uint32_t modulus_size;
|
||||
|
||||
/* Not used */
|
||||
uint32_t exponent_size;
|
||||
|
||||
/* Not used */
|
||||
uint32_t reserved1[12];
|
||||
|
||||
/* Major Minor */
|
||||
uint32_t version;
|
||||
|
||||
/* Not used */
|
||||
uint32_t reserved2[8];
|
||||
|
||||
/* Not used */
|
||||
uint32_t kernel_header_info;
|
||||
} __packed;
|
||||
|
||||
struct intel_fw_info {
|
||||
uint16_t reserved1;
|
||||
|
||||
/* Stepping (A, B, C, ..., *). * is a wildcard */
|
||||
char stepping;
|
||||
|
||||
/* Sub-stepping (0, 1, ..., *). * is a wildcard */
|
||||
char substepping;
|
||||
|
||||
uint32_t offset;
|
||||
uint32_t reserved2;
|
||||
} __packed;
|
||||
|
||||
struct intel_package_header {
|
||||
/* DMC container header length in dwords */
|
||||
unsigned char header_len;
|
||||
|
||||
/* always value would be 0x01 */
|
||||
unsigned char header_ver;
|
||||
|
||||
unsigned char reserved[10];
|
||||
|
||||
/* Number of valid entries in the FWInfo array below */
|
||||
uint32_t num_entries;
|
||||
|
||||
struct intel_fw_info fw_info[20];
|
||||
} __packed;
|
||||
|
||||
struct intel_dmc_header {
|
||||
/* always value would be 0x40403E3E */
|
||||
uint32_t signature;
|
||||
|
||||
/* DMC binary header length */
|
||||
unsigned char header_len;
|
||||
|
||||
/* 0x01 */
|
||||
unsigned char header_ver;
|
||||
|
||||
/* Reserved */
|
||||
uint16_t dmcc_ver;
|
||||
|
||||
/* Major, Minor */
|
||||
uint32_t project;
|
||||
|
||||
/* Firmware program size (excluding header) in dwords */
|
||||
uint32_t fw_size;
|
||||
|
||||
/* Major Minor version */
|
||||
uint32_t fw_version;
|
||||
|
||||
/* Number of valid MMIO cycles present. */
|
||||
uint32_t mmio_count;
|
||||
|
||||
/* MMIO address */
|
||||
uint32_t mmioaddr[8];
|
||||
|
||||
/* MMIO data */
|
||||
uint32_t mmiodata[8];
|
||||
|
||||
/* FW filename */
|
||||
unsigned char dfile[32];
|
||||
|
||||
uint32_t reserved1[2];
|
||||
} __packed;
|
||||
|
||||
struct stepping_info {
|
||||
char stepping;
|
||||
char substepping;
|
||||
};
|
||||
|
||||
static const struct stepping_info skl_stepping_info[] = {
|
||||
{'A', '0'}, {'B', '0'}, {'C', '0'},
|
||||
{'D', '0'}, {'E', '0'}, {'F', '0'},
|
||||
{'G', '0'}, {'H', '0'}, {'I', '0'}
|
||||
};
|
||||
|
||||
static char intel_get_stepping(struct drm_device *dev)
|
||||
{
|
||||
if (IS_SKYLAKE(dev) && (dev->pdev->revision <
|
||||
ARRAY_SIZE(skl_stepping_info)))
|
||||
return skl_stepping_info[dev->pdev->revision].stepping;
|
||||
else
|
||||
return -ENODATA;
|
||||
}
|
||||
|
||||
static char intel_get_substepping(struct drm_device *dev)
|
||||
{
|
||||
if (IS_SKYLAKE(dev) && (dev->pdev->revision <
|
||||
ARRAY_SIZE(skl_stepping_info)))
|
||||
return skl_stepping_info[dev->pdev->revision].substepping;
|
||||
else
|
||||
return -ENODATA;
|
||||
}
|
||||
|
||||
enum csr_state intel_csr_load_status_get(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
enum csr_state state;
|
||||
|
||||
mutex_lock(&dev_priv->csr_lock);
|
||||
state = dev_priv->csr.state;
|
||||
mutex_unlock(&dev_priv->csr_lock);
|
||||
|
||||
return state;
|
||||
}
|
||||
|
||||
void intel_csr_load_status_set(struct drm_i915_private *dev_priv,
|
||||
enum csr_state state)
|
||||
{
|
||||
mutex_lock(&dev_priv->csr_lock);
|
||||
dev_priv->csr.state = state;
|
||||
mutex_unlock(&dev_priv->csr_lock);
|
||||
}
|
||||
|
||||
void intel_csr_load_program(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
__be32 *payload = dev_priv->csr.dmc_payload;
|
||||
uint32_t i, fw_size;
|
||||
|
||||
if (!IS_GEN9(dev)) {
|
||||
DRM_ERROR("No CSR support available for this platform\n");
|
||||
return;
|
||||
}
|
||||
|
||||
mutex_lock(&dev_priv->csr_lock);
|
||||
fw_size = dev_priv->csr.dmc_fw_size;
|
||||
for (i = 0; i < fw_size; i++)
|
||||
I915_WRITE(CSR_PROGRAM_BASE + i * 4,
|
||||
(u32 __force)payload[i]);
|
||||
|
||||
for (i = 0; i < dev_priv->csr.mmio_count; i++) {
|
||||
I915_WRITE(dev_priv->csr.mmioaddr[i],
|
||||
dev_priv->csr.mmiodata[i]);
|
||||
}
|
||||
|
||||
dev_priv->csr.state = FW_LOADED;
|
||||
mutex_unlock(&dev_priv->csr_lock);
|
||||
}
|
||||
|
||||
static void finish_csr_load(const struct firmware *fw, void *context)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = context;
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
struct intel_css_header *css_header;
|
||||
struct intel_package_header *package_header;
|
||||
struct intel_dmc_header *dmc_header;
|
||||
struct intel_csr *csr = &dev_priv->csr;
|
||||
char stepping = intel_get_stepping(dev);
|
||||
char substepping = intel_get_substepping(dev);
|
||||
uint32_t dmc_offset = CSR_DEFAULT_FW_OFFSET, readcount = 0, nbytes;
|
||||
uint32_t i;
|
||||
__be32 *dmc_payload;
|
||||
bool fw_loaded = false;
|
||||
|
||||
if (!fw) {
|
||||
i915_firmware_load_error_print(csr->fw_path, 0);
|
||||
goto out;
|
||||
}
|
||||
|
||||
if ((stepping == -ENODATA) || (substepping == -ENODATA)) {
|
||||
DRM_ERROR("Unknown stepping info, firmware loading failed\n");
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* Extract CSS Header information*/
|
||||
css_header = (struct intel_css_header *)fw->data;
|
||||
if (sizeof(struct intel_css_header) !=
|
||||
(css_header->header_len * 4)) {
|
||||
DRM_ERROR("Firmware has wrong CSS header length %u bytes\n",
|
||||
(css_header->header_len * 4));
|
||||
goto out;
|
||||
}
|
||||
readcount += sizeof(struct intel_css_header);
|
||||
|
||||
/* Extract Package Header information*/
|
||||
package_header = (struct intel_package_header *)
|
||||
&fw->data[readcount];
|
||||
if (sizeof(struct intel_package_header) !=
|
||||
(package_header->header_len * 4)) {
|
||||
DRM_ERROR("Firmware has wrong package header length %u bytes\n",
|
||||
(package_header->header_len * 4));
|
||||
goto out;
|
||||
}
|
||||
readcount += sizeof(struct intel_package_header);
|
||||
|
||||
/* Search for dmc_offset to find firware binary. */
|
||||
for (i = 0; i < package_header->num_entries; i++) {
|
||||
if (package_header->fw_info[i].substepping == '*' &&
|
||||
stepping == package_header->fw_info[i].stepping) {
|
||||
dmc_offset = package_header->fw_info[i].offset;
|
||||
break;
|
||||
} else if (stepping == package_header->fw_info[i].stepping &&
|
||||
substepping == package_header->fw_info[i].substepping) {
|
||||
dmc_offset = package_header->fw_info[i].offset;
|
||||
break;
|
||||
} else if (package_header->fw_info[i].stepping == '*' &&
|
||||
package_header->fw_info[i].substepping == '*')
|
||||
dmc_offset = package_header->fw_info[i].offset;
|
||||
}
|
||||
if (dmc_offset == CSR_DEFAULT_FW_OFFSET) {
|
||||
DRM_ERROR("Firmware not supported for %c stepping\n", stepping);
|
||||
goto out;
|
||||
}
|
||||
readcount += dmc_offset;
|
||||
|
||||
/* Extract dmc_header information. */
|
||||
dmc_header = (struct intel_dmc_header *)&fw->data[readcount];
|
||||
if (sizeof(struct intel_dmc_header) != (dmc_header->header_len)) {
|
||||
DRM_ERROR("Firmware has wrong dmc header length %u bytes\n",
|
||||
(dmc_header->header_len));
|
||||
goto out;
|
||||
}
|
||||
readcount += sizeof(struct intel_dmc_header);
|
||||
|
||||
/* Cache the dmc header info. */
|
||||
if (dmc_header->mmio_count > ARRAY_SIZE(csr->mmioaddr)) {
|
||||
DRM_ERROR("Firmware has wrong mmio count %u\n",
|
||||
dmc_header->mmio_count);
|
||||
goto out;
|
||||
}
|
||||
csr->mmio_count = dmc_header->mmio_count;
|
||||
for (i = 0; i < dmc_header->mmio_count; i++) {
|
||||
if (dmc_header->mmioaddr[i] < CSR_MMIO_START_RANGE &&
|
||||
dmc_header->mmioaddr[i] > CSR_MMIO_END_RANGE) {
|
||||
DRM_ERROR(" Firmware has wrong mmio address 0x%x\n",
|
||||
dmc_header->mmioaddr[i]);
|
||||
goto out;
|
||||
}
|
||||
csr->mmioaddr[i] = dmc_header->mmioaddr[i];
|
||||
csr->mmiodata[i] = dmc_header->mmiodata[i];
|
||||
}
|
||||
|
||||
/* fw_size is in dwords, so multiplied by 4 to convert into bytes. */
|
||||
nbytes = dmc_header->fw_size * 4;
|
||||
if (nbytes > CSR_MAX_FW_SIZE) {
|
||||
DRM_ERROR("CSR firmware too big (%u) bytes\n", nbytes);
|
||||
goto out;
|
||||
}
|
||||
csr->dmc_fw_size = dmc_header->fw_size;
|
||||
|
||||
csr->dmc_payload = kmalloc(nbytes, GFP_KERNEL);
|
||||
if (!csr->dmc_payload) {
|
||||
DRM_ERROR("Memory allocation failed for dmc payload\n");
|
||||
goto out;
|
||||
}
|
||||
|
||||
dmc_payload = csr->dmc_payload;
|
||||
for (i = 0; i < dmc_header->fw_size; i++) {
|
||||
uint32_t *tmp = (u32 *)&fw->data[readcount + i * 4];
|
||||
/*
|
||||
* The firmware payload is an array of 32 bit words stored in
|
||||
* little-endian format in the firmware image and programmed
|
||||
* as 32 bit big-endian format to memory.
|
||||
*/
|
||||
dmc_payload[i] = cpu_to_be32(*tmp);
|
||||
}
|
||||
|
||||
/* load csr program during system boot, as needed for DC states */
|
||||
intel_csr_load_program(dev);
|
||||
fw_loaded = true;
|
||||
|
||||
out:
|
||||
if (fw_loaded)
|
||||
intel_runtime_pm_put(dev_priv);
|
||||
else
|
||||
intel_csr_load_status_set(dev_priv, FW_FAILED);
|
||||
|
||||
release_firmware(fw);
|
||||
}
|
||||
|
||||
void intel_csr_ucode_init(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_csr *csr = &dev_priv->csr;
|
||||
int ret;
|
||||
|
||||
if (!HAS_CSR(dev))
|
||||
return;
|
||||
|
||||
if (IS_SKYLAKE(dev))
|
||||
csr->fw_path = I915_CSR_SKL;
|
||||
else {
|
||||
DRM_ERROR("Unexpected: no known CSR firmware for platform\n");
|
||||
intel_csr_load_status_set(dev_priv, FW_FAILED);
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* Obtain a runtime pm reference, until CSR is loaded,
|
||||
* to avoid entering runtime-suspend.
|
||||
*/
|
||||
intel_runtime_pm_get(dev_priv);
|
||||
|
||||
/* CSR supported for platform, load firmware */
|
||||
ret = request_firmware_nowait(THIS_MODULE, true, csr->fw_path,
|
||||
&dev_priv->dev->pdev->dev,
|
||||
GFP_KERNEL, dev_priv,
|
||||
finish_csr_load);
|
||||
if (ret) {
|
||||
i915_firmware_load_error_print(csr->fw_path, ret);
|
||||
intel_csr_load_status_set(dev_priv, FW_FAILED);
|
||||
}
|
||||
}
|
||||
|
||||
void intel_csr_ucode_fini(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
if (!HAS_CSR(dev))
|
||||
return;
|
||||
|
||||
intel_csr_load_status_set(dev_priv, FW_FAILED);
|
||||
kfree(dev_priv->csr.dmc_payload);
|
||||
}
|
||||
|
||||
void assert_csr_loaded(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
WARN((intel_csr_load_status_get(dev_priv) != FW_LOADED), "CSR is not loaded.\n");
|
||||
WARN(!I915_READ(CSR_PROGRAM_BASE),
|
||||
"CSR program storage start is NULL\n");
|
||||
WARN(!I915_READ(CSR_SSP_BASE), "CSR SSP Base Not fine\n");
|
||||
WARN(!I915_READ(CSR_HTP_SKL), "CSR HTP Not fine\n");
|
||||
}
|
@ -282,7 +282,7 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port,
|
||||
ddi_translations_fdi = NULL;
|
||||
ddi_translations_dp = skl_ddi_translations_dp;
|
||||
n_dp_entries = ARRAY_SIZE(skl_ddi_translations_dp);
|
||||
if (dev_priv->vbt.edp_low_vswing) {
|
||||
if (dev_priv->edp_low_vswing) {
|
||||
ddi_translations_edp = skl_ddi_translations_edp;
|
||||
n_edp_entries = ARRAY_SIZE(skl_ddi_translations_edp);
|
||||
} else {
|
||||
@ -584,17 +584,18 @@ intel_ddi_get_crtc_new_encoder(struct intel_crtc_state *crtc_state)
|
||||
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
|
||||
struct intel_encoder *ret = NULL;
|
||||
struct drm_atomic_state *state;
|
||||
struct drm_connector *connector;
|
||||
struct drm_connector_state *connector_state;
|
||||
int num_encoders = 0;
|
||||
int i;
|
||||
|
||||
state = crtc_state->base.state;
|
||||
|
||||
for (i = 0; i < state->num_connector; i++) {
|
||||
if (!state->connectors[i] ||
|
||||
state->connector_states[i]->crtc != crtc_state->base.crtc)
|
||||
for_each_connector_in_state(state, connector, connector_state, i) {
|
||||
if (connector_state->crtc != crtc_state->base.crtc)
|
||||
continue;
|
||||
|
||||
ret = to_intel_encoder(state->connector_states[i]->best_encoder);
|
||||
ret = to_intel_encoder(connector_state->best_encoder);
|
||||
num_encoders++;
|
||||
}
|
||||
|
||||
@ -870,26 +871,26 @@ static void skl_ddi_clock_get(struct intel_encoder *encoder,
|
||||
if (dpll_ctl1 & DPLL_CTRL1_HDMI_MODE(dpll)) {
|
||||
link_clock = skl_calc_wrpll_link(dev_priv, dpll);
|
||||
} else {
|
||||
link_clock = dpll_ctl1 & DPLL_CRTL1_LINK_RATE_MASK(dpll);
|
||||
link_clock >>= DPLL_CRTL1_LINK_RATE_SHIFT(dpll);
|
||||
link_clock = dpll_ctl1 & DPLL_CTRL1_LINK_RATE_MASK(dpll);
|
||||
link_clock >>= DPLL_CTRL1_LINK_RATE_SHIFT(dpll);
|
||||
|
||||
switch (link_clock) {
|
||||
case DPLL_CRTL1_LINK_RATE_810:
|
||||
case DPLL_CTRL1_LINK_RATE_810:
|
||||
link_clock = 81000;
|
||||
break;
|
||||
case DPLL_CRTL1_LINK_RATE_1080:
|
||||
case DPLL_CTRL1_LINK_RATE_1080:
|
||||
link_clock = 108000;
|
||||
break;
|
||||
case DPLL_CRTL1_LINK_RATE_1350:
|
||||
case DPLL_CTRL1_LINK_RATE_1350:
|
||||
link_clock = 135000;
|
||||
break;
|
||||
case DPLL_CRTL1_LINK_RATE_1620:
|
||||
case DPLL_CTRL1_LINK_RATE_1620:
|
||||
link_clock = 162000;
|
||||
break;
|
||||
case DPLL_CRTL1_LINK_RATE_2160:
|
||||
case DPLL_CTRL1_LINK_RATE_2160:
|
||||
link_clock = 216000;
|
||||
break;
|
||||
case DPLL_CRTL1_LINK_RATE_2700:
|
||||
case DPLL_CTRL1_LINK_RATE_2700:
|
||||
link_clock = 270000;
|
||||
break;
|
||||
default:
|
||||
@ -1188,69 +1189,69 @@ found:
|
||||
if (min_dco_index > 2) {
|
||||
WARN(1, "No valid values found for the given pixel clock\n");
|
||||
} else {
|
||||
wrpll_params->central_freq = dco_central_freq[min_dco_index];
|
||||
wrpll_params->central_freq = dco_central_freq[min_dco_index];
|
||||
|
||||
switch (dco_central_freq[min_dco_index]) {
|
||||
case 9600000000ULL:
|
||||
switch (dco_central_freq[min_dco_index]) {
|
||||
case 9600000000ULL:
|
||||
wrpll_params->central_freq = 0;
|
||||
break;
|
||||
case 9000000000ULL:
|
||||
case 9000000000ULL:
|
||||
wrpll_params->central_freq = 1;
|
||||
break;
|
||||
case 8400000000ULL:
|
||||
case 8400000000ULL:
|
||||
wrpll_params->central_freq = 3;
|
||||
}
|
||||
}
|
||||
|
||||
switch (candidate_p0[min_dco_index]) {
|
||||
case 1:
|
||||
switch (candidate_p0[min_dco_index]) {
|
||||
case 1:
|
||||
wrpll_params->pdiv = 0;
|
||||
break;
|
||||
case 2:
|
||||
case 2:
|
||||
wrpll_params->pdiv = 1;
|
||||
break;
|
||||
case 3:
|
||||
case 3:
|
||||
wrpll_params->pdiv = 2;
|
||||
break;
|
||||
case 7:
|
||||
case 7:
|
||||
wrpll_params->pdiv = 4;
|
||||
break;
|
||||
default:
|
||||
default:
|
||||
WARN(1, "Incorrect PDiv\n");
|
||||
}
|
||||
}
|
||||
|
||||
switch (candidate_p2[min_dco_index]) {
|
||||
case 5:
|
||||
switch (candidate_p2[min_dco_index]) {
|
||||
case 5:
|
||||
wrpll_params->kdiv = 0;
|
||||
break;
|
||||
case 2:
|
||||
case 2:
|
||||
wrpll_params->kdiv = 1;
|
||||
break;
|
||||
case 3:
|
||||
case 3:
|
||||
wrpll_params->kdiv = 2;
|
||||
break;
|
||||
case 1:
|
||||
case 1:
|
||||
wrpll_params->kdiv = 3;
|
||||
break;
|
||||
default:
|
||||
default:
|
||||
WARN(1, "Incorrect KDiv\n");
|
||||
}
|
||||
}
|
||||
|
||||
wrpll_params->qdiv_ratio = candidate_p1[min_dco_index];
|
||||
wrpll_params->qdiv_mode =
|
||||
wrpll_params->qdiv_ratio = candidate_p1[min_dco_index];
|
||||
wrpll_params->qdiv_mode =
|
||||
(wrpll_params->qdiv_ratio == 1) ? 0 : 1;
|
||||
|
||||
dco_freq = candidate_p0[min_dco_index] *
|
||||
candidate_p1[min_dco_index] *
|
||||
candidate_p2[min_dco_index] * afe_clock;
|
||||
dco_freq = candidate_p0[min_dco_index] *
|
||||
candidate_p1[min_dco_index] *
|
||||
candidate_p2[min_dco_index] * afe_clock;
|
||||
|
||||
/*
|
||||
* Intermediate values are in Hz.
|
||||
* Divide by MHz to match bsepc
|
||||
*/
|
||||
wrpll_params->dco_integer = div_u64(dco_freq, (24 * MHz(1)));
|
||||
wrpll_params->dco_fraction =
|
||||
div_u64(((div_u64(dco_freq, 24) -
|
||||
wrpll_params->dco_integer * MHz(1)) * 0x8000), MHz(1));
|
||||
* Intermediate values are in Hz.
|
||||
* Divide by MHz to match bsepc
|
||||
*/
|
||||
wrpll_params->dco_integer = div_u64(dco_freq, (24 * MHz(1)));
|
||||
wrpll_params->dco_fraction =
|
||||
div_u64(((div_u64(dco_freq, 24) -
|
||||
wrpll_params->dco_integer * MHz(1)) * 0x8000), MHz(1));
|
||||
|
||||
}
|
||||
}
|
||||
@ -1294,13 +1295,13 @@ skl_ddi_pll_select(struct intel_crtc *intel_crtc,
|
||||
|
||||
switch (intel_dp->link_bw) {
|
||||
case DP_LINK_BW_1_62:
|
||||
ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_810, 0);
|
||||
ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, 0);
|
||||
break;
|
||||
case DP_LINK_BW_2_7:
|
||||
ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_1350, 0);
|
||||
ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350, 0);
|
||||
break;
|
||||
case DP_LINK_BW_5_4:
|
||||
ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_2700, 0);
|
||||
ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700, 0);
|
||||
break;
|
||||
}
|
||||
|
||||
@ -1854,7 +1855,7 @@ static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder)
|
||||
|
||||
val &= ~(DPLL_CTRL1_HDMI_MODE(dpll) |
|
||||
DPLL_CTRL1_SSC(dpll) |
|
||||
DPLL_CRTL1_LINK_RATE_MASK(dpll));
|
||||
DPLL_CTRL1_LINK_RATE_MASK(dpll));
|
||||
val |= crtc->config->dpll_hw_state.ctrl1 << (dpll * 6);
|
||||
|
||||
I915_WRITE(DPLL_CTRL1, val);
|
||||
@ -2100,7 +2101,7 @@ static void skl_ddi_pll_enable(struct drm_i915_private *dev_priv,
|
||||
val = I915_READ(DPLL_CTRL1);
|
||||
|
||||
val &= ~(DPLL_CTRL1_HDMI_MODE(dpll) | DPLL_CTRL1_SSC(dpll) |
|
||||
DPLL_CRTL1_LINK_RATE_MASK(dpll));
|
||||
DPLL_CTRL1_LINK_RATE_MASK(dpll));
|
||||
val |= pll->config.hw_state.ctrl1 << (dpll * 6);
|
||||
|
||||
I915_WRITE(DPLL_CTRL1, val);
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -41,6 +41,12 @@
|
||||
|
||||
#define DP_LINK_CHECK_TIMEOUT (10 * 1000)
|
||||
|
||||
/* Compliance test status bits */
|
||||
#define INTEL_DP_RESOLUTION_SHIFT_MASK 0
|
||||
#define INTEL_DP_RESOLUTION_PREFERRED (1 << INTEL_DP_RESOLUTION_SHIFT_MASK)
|
||||
#define INTEL_DP_RESOLUTION_STANDARD (2 << INTEL_DP_RESOLUTION_SHIFT_MASK)
|
||||
#define INTEL_DP_RESOLUTION_FAILSAFE (3 << INTEL_DP_RESOLUTION_SHIFT_MASK)
|
||||
|
||||
struct dp_link_dpll {
|
||||
int link_bw;
|
||||
struct dpll dpll;
|
||||
@ -84,8 +90,8 @@ static const struct dp_link_dpll chv_dpll[] = {
|
||||
{ DP_LINK_BW_5_4, /* m2_int = 27, m2_fraction = 0 */
|
||||
{ .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }
|
||||
};
|
||||
/* Skylake supports following rates */
|
||||
static const int gen9_rates[] = { 162000, 216000, 270000,
|
||||
|
||||
static const int skl_rates[] = { 162000, 216000, 270000,
|
||||
324000, 432000, 540000 };
|
||||
static const int chv_rates[] = { 162000, 202500, 210000, 216000,
|
||||
243000, 270000, 324000, 405000,
|
||||
@ -1098,30 +1104,30 @@ skl_edp_set_pll_config(struct intel_crtc_state *pipe_config, int link_clock)
|
||||
ctrl1 = DPLL_CTRL1_OVERRIDE(SKL_DPLL0);
|
||||
switch (link_clock / 2) {
|
||||
case 81000:
|
||||
ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_810,
|
||||
ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810,
|
||||
SKL_DPLL0);
|
||||
break;
|
||||
case 135000:
|
||||
ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_1350,
|
||||
ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350,
|
||||
SKL_DPLL0);
|
||||
break;
|
||||
case 270000:
|
||||
ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_2700,
|
||||
ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700,
|
||||
SKL_DPLL0);
|
||||
break;
|
||||
case 162000:
|
||||
ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_1620,
|
||||
ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620,
|
||||
SKL_DPLL0);
|
||||
break;
|
||||
/* TBD: For DP link rates 2.16 GHz and 4.32 GHz, VCO is 8640 which
|
||||
results in CDCLK change. Need to handle the change of CDCLK by
|
||||
disabling pipes and re-enabling them */
|
||||
case 108000:
|
||||
ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_1080,
|
||||
ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080,
|
||||
SKL_DPLL0);
|
||||
break;
|
||||
case 216000:
|
||||
ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_2160,
|
||||
ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160,
|
||||
SKL_DPLL0);
|
||||
break;
|
||||
|
||||
@ -1161,9 +1167,9 @@ intel_dp_sink_rates(struct intel_dp *intel_dp, const int **sink_rates)
|
||||
static int
|
||||
intel_dp_source_rates(struct drm_device *dev, const int **source_rates)
|
||||
{
|
||||
if (INTEL_INFO(dev)->gen >= 9) {
|
||||
*source_rates = gen9_rates;
|
||||
return ARRAY_SIZE(gen9_rates);
|
||||
if (IS_SKYLAKE(dev)) {
|
||||
*source_rates = skl_rates;
|
||||
return ARRAY_SIZE(skl_rates);
|
||||
} else if (IS_CHERRYVIEW(dev)) {
|
||||
*source_rates = chv_rates;
|
||||
return ARRAY_SIZE(chv_rates);
|
||||
@ -2491,6 +2497,7 @@ static void intel_enable_dp(struct intel_encoder *encoder)
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
|
||||
uint32_t dp_reg = I915_READ(intel_dp->output_reg);
|
||||
unsigned int lane_mask = 0x0;
|
||||
|
||||
if (WARN_ON(dp_reg & DP_PORT_EN))
|
||||
return;
|
||||
@ -2509,7 +2516,8 @@ static void intel_enable_dp(struct intel_encoder *encoder)
|
||||
pps_unlock(intel_dp);
|
||||
|
||||
if (IS_VALLEYVIEW(dev))
|
||||
vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp));
|
||||
vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp),
|
||||
lane_mask);
|
||||
|
||||
intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
|
||||
intel_dp_start_link_train(intel_dp);
|
||||
@ -2726,7 +2734,7 @@ static void chv_pre_enable_dp(struct intel_encoder *encoder)
|
||||
to_intel_crtc(encoder->base.crtc);
|
||||
enum dpio_channel ch = vlv_dport_to_channel(dport);
|
||||
int pipe = intel_crtc->pipe;
|
||||
int data, i;
|
||||
int data, i, stagger;
|
||||
u32 val;
|
||||
|
||||
mutex_lock(&dev_priv->dpio_lock);
|
||||
@ -2766,7 +2774,38 @@ static void chv_pre_enable_dp(struct intel_encoder *encoder)
|
||||
}
|
||||
|
||||
/* Data lane stagger programming */
|
||||
/* FIXME: Fix up value only after power analysis */
|
||||
if (intel_crtc->config->port_clock > 270000)
|
||||
stagger = 0x18;
|
||||
else if (intel_crtc->config->port_clock > 135000)
|
||||
stagger = 0xd;
|
||||
else if (intel_crtc->config->port_clock > 67500)
|
||||
stagger = 0x7;
|
||||
else if (intel_crtc->config->port_clock > 33750)
|
||||
stagger = 0x4;
|
||||
else
|
||||
stagger = 0x2;
|
||||
|
||||
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
|
||||
val |= DPIO_TX2_STAGGER_MASK(0x1f);
|
||||
vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
|
||||
|
||||
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
|
||||
val |= DPIO_TX2_STAGGER_MASK(0x1f);
|
||||
vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
|
||||
|
||||
vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW12(ch),
|
||||
DPIO_LANESTAGGER_STRAP(stagger) |
|
||||
DPIO_LANESTAGGER_STRAP_OVRD |
|
||||
DPIO_TX1_STAGGER_MASK(0x1f) |
|
||||
DPIO_TX1_STAGGER_MULT(6) |
|
||||
DPIO_TX2_STAGGER_MULT(0));
|
||||
|
||||
vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW12(ch),
|
||||
DPIO_LANESTAGGER_STRAP(stagger) |
|
||||
DPIO_LANESTAGGER_STRAP_OVRD |
|
||||
DPIO_TX1_STAGGER_MASK(0x1f) |
|
||||
DPIO_TX1_STAGGER_MULT(7) |
|
||||
DPIO_TX2_STAGGER_MULT(5));
|
||||
|
||||
mutex_unlock(&dev_priv->dpio_lock);
|
||||
|
||||
@ -2894,7 +2933,7 @@ intel_dp_voltage_max(struct intel_dp *intel_dp)
|
||||
if (IS_BROXTON(dev))
|
||||
return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
|
||||
else if (INTEL_INFO(dev)->gen >= 9) {
|
||||
if (dev_priv->vbt.edp_low_vswing && port == PORT_A)
|
||||
if (dev_priv->edp_low_vswing && port == PORT_A)
|
||||
return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
|
||||
return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
|
||||
} else if (IS_VALLEYVIEW(dev))
|
||||
@ -3547,7 +3586,8 @@ static bool
|
||||
intel_dp_reset_link_train(struct intel_dp *intel_dp, uint32_t *DP,
|
||||
uint8_t dp_train_pat)
|
||||
{
|
||||
memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set));
|
||||
if (!intel_dp->train_set_valid)
|
||||
memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set));
|
||||
intel_dp_set_signal_levels(intel_dp, DP);
|
||||
return intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
|
||||
}
|
||||
@ -3660,6 +3700,23 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
|
||||
break;
|
||||
}
|
||||
|
||||
/*
|
||||
* if we used previously trained voltage and pre-emphasis values
|
||||
* and we don't get clock recovery, reset link training values
|
||||
*/
|
||||
if (intel_dp->train_set_valid) {
|
||||
DRM_DEBUG_KMS("clock recovery not ok, reset");
|
||||
/* clear the flag as we are not reusing train set */
|
||||
intel_dp->train_set_valid = false;
|
||||
if (!intel_dp_reset_link_train(intel_dp, &DP,
|
||||
DP_TRAINING_PATTERN_1 |
|
||||
DP_LINK_SCRAMBLING_DISABLE)) {
|
||||
DRM_ERROR("failed to enable link training\n");
|
||||
return;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
/* Check to see if we've tried the max voltage */
|
||||
for (i = 0; i < intel_dp->lane_count; i++)
|
||||
if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
|
||||
@ -3737,6 +3794,7 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
|
||||
|
||||
/* Make sure clock is still ok */
|
||||
if (!drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
|
||||
intel_dp->train_set_valid = false;
|
||||
intel_dp_start_link_train(intel_dp);
|
||||
intel_dp_set_link_train(intel_dp, &DP,
|
||||
training_pattern |
|
||||
@ -3752,6 +3810,7 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
|
||||
|
||||
/* Try 5 times, then try clock recovery if that fails */
|
||||
if (tries > 5) {
|
||||
intel_dp->train_set_valid = false;
|
||||
intel_dp_start_link_train(intel_dp);
|
||||
intel_dp_set_link_train(intel_dp, &DP,
|
||||
training_pattern |
|
||||
@ -3773,9 +3832,10 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
|
||||
|
||||
intel_dp->DP = DP;
|
||||
|
||||
if (channel_eq)
|
||||
if (channel_eq) {
|
||||
intel_dp->train_set_valid = true;
|
||||
DRM_DEBUG_KMS("Channel EQ done. DP Training successful\n");
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
void intel_dp_stop_link_train(struct intel_dp *intel_dp)
|
||||
@ -4058,6 +4118,39 @@ static uint8_t intel_dp_autotest_video_pattern(struct intel_dp *intel_dp)
|
||||
static uint8_t intel_dp_autotest_edid(struct intel_dp *intel_dp)
|
||||
{
|
||||
uint8_t test_result = DP_TEST_NAK;
|
||||
struct intel_connector *intel_connector = intel_dp->attached_connector;
|
||||
struct drm_connector *connector = &intel_connector->base;
|
||||
|
||||
if (intel_connector->detect_edid == NULL ||
|
||||
connector->edid_corrupt ||
|
||||
intel_dp->aux.i2c_defer_count > 6) {
|
||||
/* Check EDID read for NACKs, DEFERs and corruption
|
||||
* (DP CTS 1.2 Core r1.1)
|
||||
* 4.2.2.4 : Failed EDID read, I2C_NAK
|
||||
* 4.2.2.5 : Failed EDID read, I2C_DEFER
|
||||
* 4.2.2.6 : EDID corruption detected
|
||||
* Use failsafe mode for all cases
|
||||
*/
|
||||
if (intel_dp->aux.i2c_nack_count > 0 ||
|
||||
intel_dp->aux.i2c_defer_count > 0)
|
||||
DRM_DEBUG_KMS("EDID read had %d NACKs, %d DEFERs\n",
|
||||
intel_dp->aux.i2c_nack_count,
|
||||
intel_dp->aux.i2c_defer_count);
|
||||
intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_FAILSAFE;
|
||||
} else {
|
||||
if (!drm_dp_dpcd_write(&intel_dp->aux,
|
||||
DP_TEST_EDID_CHECKSUM,
|
||||
&intel_connector->detect_edid->checksum,
|
||||
1));
|
||||
DRM_DEBUG_KMS("Failed to write EDID checksum\n");
|
||||
|
||||
test_result = DP_TEST_ACK | DP_TEST_EDID_CHECKSUM_WRITE;
|
||||
intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_STANDARD;
|
||||
}
|
||||
|
||||
/* Set test active flag here so userspace doesn't interrupt things */
|
||||
intel_dp->compliance_test_active = 1;
|
||||
|
||||
return test_result;
|
||||
}
|
||||
|
||||
@ -4073,7 +4166,10 @@ static void intel_dp_handle_test_request(struct intel_dp *intel_dp)
|
||||
uint8_t rxdata = 0;
|
||||
int status = 0;
|
||||
|
||||
intel_dp->compliance_test_active = 0;
|
||||
intel_dp->compliance_test_type = 0;
|
||||
intel_dp->compliance_test_data = 0;
|
||||
|
||||
intel_dp->aux.i2c_nack_count = 0;
|
||||
intel_dp->aux.i2c_defer_count = 0;
|
||||
|
||||
@ -4220,7 +4316,7 @@ intel_dp_check_link_status(struct intel_dp *intel_dp)
|
||||
sink_irq_vector);
|
||||
|
||||
if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
|
||||
intel_dp_handle_test_request(intel_dp);
|
||||
DRM_DEBUG_DRIVER("Test request in short pulse not handled\n");
|
||||
if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
|
||||
DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
|
||||
}
|
||||
@ -4450,6 +4546,7 @@ intel_dp_detect(struct drm_connector *connector, bool force)
|
||||
enum drm_connector_status status;
|
||||
enum intel_display_power_domain power_domain;
|
||||
bool ret;
|
||||
u8 sink_irq_vector;
|
||||
|
||||
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
|
||||
connector->base.id, connector->name);
|
||||
@ -4492,6 +4589,20 @@ intel_dp_detect(struct drm_connector *connector, bool force)
|
||||
intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
|
||||
status = connector_status_connected;
|
||||
|
||||
/* Try to read the source of the interrupt */
|
||||
if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
|
||||
intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
|
||||
/* Clear interrupt source */
|
||||
drm_dp_dpcd_writeb(&intel_dp->aux,
|
||||
DP_DEVICE_SERVICE_IRQ_VECTOR,
|
||||
sink_irq_vector);
|
||||
|
||||
if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
|
||||
intel_dp_handle_test_request(intel_dp);
|
||||
if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
|
||||
DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
|
||||
}
|
||||
|
||||
out:
|
||||
intel_dp_power_put(intel_dp, power_domain);
|
||||
return status;
|
||||
@ -4822,6 +4933,8 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
|
||||
intel_display_power_get(dev_priv, power_domain);
|
||||
|
||||
if (long_hpd) {
|
||||
/* indicate that we need to restart link training */
|
||||
intel_dp->train_set_valid = false;
|
||||
|
||||
if (HAS_PCH_SPLIT(dev)) {
|
||||
if (!ibx_digital_port_connected(dev_priv, intel_dig_port))
|
||||
|
@ -40,7 +40,9 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder,
|
||||
int bpp, i;
|
||||
int lane_count, slots, rate;
|
||||
struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
|
||||
struct intel_connector *found = NULL;
|
||||
struct drm_connector *drm_connector;
|
||||
struct intel_connector *connector, *found = NULL;
|
||||
struct drm_connector_state *connector_state;
|
||||
int mst_pbn;
|
||||
|
||||
pipe_config->dp_encoder_is_mst = true;
|
||||
@ -70,12 +72,11 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder,
|
||||
|
||||
state = pipe_config->base.state;
|
||||
|
||||
for (i = 0; i < state->num_connector; i++) {
|
||||
if (!state->connectors[i])
|
||||
continue;
|
||||
for_each_connector_in_state(state, drm_connector, connector_state, i) {
|
||||
connector = to_intel_connector(drm_connector);
|
||||
|
||||
if (state->connector_states[i]->best_encoder == &encoder->base) {
|
||||
found = to_intel_connector(state->connectors[i]);
|
||||
if (connector_state->best_encoder == &encoder->base) {
|
||||
found = connector;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -248,12 +248,6 @@ struct intel_plane_state {
|
||||
struct drm_rect clip;
|
||||
bool visible;
|
||||
|
||||
/*
|
||||
* used only for sprite planes to determine when to implicitly
|
||||
* enable/disable the primary plane
|
||||
*/
|
||||
bool hides_primary;
|
||||
|
||||
/*
|
||||
* scaler_id
|
||||
* = -1 : not using a scaler
|
||||
@ -285,11 +279,11 @@ struct intel_initial_plane_config {
|
||||
#define SKL_MIN_SRC_W 8
|
||||
#define SKL_MAX_SRC_W 4096
|
||||
#define SKL_MIN_SRC_H 8
|
||||
#define SKL_MAX_SRC_H 2304
|
||||
#define SKL_MAX_SRC_H 4096
|
||||
#define SKL_MIN_DST_W 8
|
||||
#define SKL_MAX_DST_W 4096
|
||||
#define SKL_MIN_DST_H 8
|
||||
#define SKL_MAX_DST_H 2304
|
||||
#define SKL_MAX_DST_H 4096
|
||||
|
||||
struct intel_scaler {
|
||||
int id;
|
||||
@ -513,7 +507,6 @@ struct intel_crtc {
|
||||
*/
|
||||
bool active;
|
||||
unsigned long enabled_power_domains;
|
||||
bool primary_enabled; /* is the primary plane (partially) visible? */
|
||||
bool lowfreq_avail;
|
||||
struct intel_overlay *overlay;
|
||||
struct intel_unpin_work *unpin_work;
|
||||
@ -600,7 +593,7 @@ struct intel_plane {
|
||||
uint32_t x, uint32_t y,
|
||||
uint32_t src_w, uint32_t src_h);
|
||||
void (*disable_plane)(struct drm_plane *plane,
|
||||
struct drm_crtc *crtc);
|
||||
struct drm_crtc *crtc, bool force);
|
||||
int (*check_plane)(struct drm_plane *plane,
|
||||
struct intel_plane_state *state);
|
||||
void (*commit_plane)(struct drm_plane *plane,
|
||||
@ -736,9 +729,12 @@ struct intel_dp {
|
||||
bool has_aux_irq,
|
||||
int send_bytes,
|
||||
uint32_t aux_clock_divider);
|
||||
bool train_set_valid;
|
||||
|
||||
/* Displayport compliance testing */
|
||||
unsigned long compliance_test_type;
|
||||
unsigned long compliance_test_data;
|
||||
bool compliance_test_active;
|
||||
};
|
||||
|
||||
struct intel_digital_port {
|
||||
@ -817,15 +813,6 @@ struct intel_unpin_work {
|
||||
bool enable_stall_check;
|
||||
};
|
||||
|
||||
struct intel_set_config {
|
||||
struct drm_encoder **save_connector_encoders;
|
||||
struct drm_crtc **save_encoder_crtcs;
|
||||
bool *save_crtc_enabled;
|
||||
|
||||
bool fb_changed;
|
||||
bool mode_changed;
|
||||
};
|
||||
|
||||
struct intel_load_detect_pipe {
|
||||
struct drm_framebuffer *release_fb;
|
||||
bool load_detect_temp;
|
||||
@ -998,6 +985,7 @@ void intel_mark_busy(struct drm_device *dev);
|
||||
void intel_mark_idle(struct drm_device *dev);
|
||||
void intel_crtc_restore_mode(struct drm_crtc *crtc);
|
||||
void intel_crtc_control(struct drm_crtc *crtc, bool enable);
|
||||
void intel_crtc_reset(struct intel_crtc *crtc);
|
||||
void intel_crtc_update_dpms(struct drm_crtc *crtc);
|
||||
void intel_encoder_destroy(struct drm_encoder *encoder);
|
||||
int intel_connector_init(struct intel_connector *);
|
||||
@ -1025,7 +1013,8 @@ intel_wait_for_vblank(struct drm_device *dev, int pipe)
|
||||
}
|
||||
int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp);
|
||||
void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
|
||||
struct intel_digital_port *dport);
|
||||
struct intel_digital_port *dport,
|
||||
unsigned int expected_mask);
|
||||
bool intel_get_load_detect_pipe(struct drm_connector *connector,
|
||||
struct drm_display_mode *mode,
|
||||
struct intel_load_detect_pipe *old,
|
||||
@ -1145,9 +1134,22 @@ void skl_detach_scalers(struct intel_crtc *intel_crtc);
|
||||
int skl_update_scaler_users(struct intel_crtc *intel_crtc,
|
||||
struct intel_crtc_state *crtc_state, struct intel_plane *intel_plane,
|
||||
struct intel_plane_state *plane_state, int force_detach);
|
||||
int skl_max_scale(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state);
|
||||
|
||||
unsigned long intel_plane_obj_offset(struct intel_plane *intel_plane,
|
||||
struct drm_i915_gem_object *obj);
|
||||
u32 skl_plane_ctl_format(uint32_t pixel_format);
|
||||
u32 skl_plane_ctl_tiling(uint64_t fb_modifier);
|
||||
u32 skl_plane_ctl_rotation(unsigned int rotation);
|
||||
|
||||
/* intel_csr.c */
|
||||
void intel_csr_ucode_init(struct drm_device *dev);
|
||||
enum csr_state intel_csr_load_status_get(struct drm_i915_private *dev_priv);
|
||||
void intel_csr_load_status_set(struct drm_i915_private *dev_priv,
|
||||
enum csr_state state);
|
||||
void intel_csr_load_program(struct drm_device *dev);
|
||||
void intel_csr_ucode_fini(struct drm_device *dev);
|
||||
void assert_csr_loaded(struct drm_i915_private *dev_priv);
|
||||
|
||||
/* intel_dp.c */
|
||||
void intel_dp_init(struct drm_device *dev, int output_reg, enum port port);
|
||||
@ -1380,8 +1382,6 @@ int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
|
||||
bool intel_pipe_update_start(struct intel_crtc *crtc,
|
||||
uint32_t *start_vbl_count);
|
||||
void intel_pipe_update_end(struct intel_crtc *crtc, u32 start_vbl_count);
|
||||
void intel_post_enable_primary(struct drm_crtc *crtc);
|
||||
void intel_pre_disable_primary(struct drm_crtc *crtc);
|
||||
|
||||
/* intel_tv.c */
|
||||
void intel_tv_init(struct drm_device *dev);
|
||||
@ -1406,7 +1406,7 @@ intel_atomic_get_crtc_state(struct drm_atomic_state *state,
|
||||
struct drm_crtc_state *crtc_state;
|
||||
crtc_state = drm_atomic_get_crtc_state(state, &crtc->base);
|
||||
if (IS_ERR(crtc_state))
|
||||
return ERR_PTR(PTR_ERR(crtc_state));
|
||||
return ERR_CAST(crtc_state);
|
||||
|
||||
return to_intel_crtc_state(crtc_state);
|
||||
}
|
||||
|
@ -457,7 +457,7 @@ static struct drm_crtc *intel_fbc_find_crtc(struct drm_i915_private *dev_priv)
|
||||
tmp_crtc = dev_priv->pipe_to_crtc_mapping[pipe];
|
||||
|
||||
if (intel_crtc_active(tmp_crtc) &&
|
||||
to_intel_crtc(tmp_crtc)->primary_enabled) {
|
||||
to_intel_plane_state(tmp_crtc->primary->state)->visible) {
|
||||
if (one_pipe_only && crtc) {
|
||||
if (set_no_fbc_reason(dev_priv, FBC_MULTIPLE_PIPES))
|
||||
DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
|
||||
|
@ -964,6 +964,7 @@ static bool hdmi_12bpc_possible(struct intel_crtc_state *crtc_state)
|
||||
struct drm_device *dev = crtc_state->base.crtc->dev;
|
||||
struct drm_atomic_state *state;
|
||||
struct intel_encoder *encoder;
|
||||
struct drm_connector *connector;
|
||||
struct drm_connector_state *connector_state;
|
||||
int count = 0, count_hdmi = 0;
|
||||
int i;
|
||||
@ -973,11 +974,7 @@ static bool hdmi_12bpc_possible(struct intel_crtc_state *crtc_state)
|
||||
|
||||
state = crtc_state->base.state;
|
||||
|
||||
for (i = 0; i < state->num_connector; i++) {
|
||||
if (!state->connectors[i])
|
||||
continue;
|
||||
|
||||
connector_state = state->connector_states[i];
|
||||
for_each_connector_in_state(state, connector, connector_state, i) {
|
||||
if (connector_state->crtc != crtc_state->base.crtc)
|
||||
continue;
|
||||
|
||||
@ -1327,7 +1324,7 @@ static void vlv_hdmi_pre_enable(struct intel_encoder *encoder)
|
||||
|
||||
intel_enable_hdmi(encoder);
|
||||
|
||||
vlv_wait_port_ready(dev_priv, dport);
|
||||
vlv_wait_port_ready(dev_priv, dport, 0x0);
|
||||
}
|
||||
|
||||
static void vlv_hdmi_pre_pll_enable(struct intel_encoder *encoder)
|
||||
@ -1490,7 +1487,7 @@ static void chv_hdmi_pre_enable(struct intel_encoder *encoder)
|
||||
&intel_crtc->config->base.adjusted_mode;
|
||||
enum dpio_channel ch = vlv_dport_to_channel(dport);
|
||||
int pipe = intel_crtc->pipe;
|
||||
int data, i;
|
||||
int data, i, stagger;
|
||||
u32 val;
|
||||
|
||||
mutex_lock(&dev_priv->dpio_lock);
|
||||
@ -1530,7 +1527,38 @@ static void chv_hdmi_pre_enable(struct intel_encoder *encoder)
|
||||
}
|
||||
|
||||
/* Data lane stagger programming */
|
||||
/* FIXME: Fix up value only after power analysis */
|
||||
if (intel_crtc->config->port_clock > 270000)
|
||||
stagger = 0x18;
|
||||
else if (intel_crtc->config->port_clock > 135000)
|
||||
stagger = 0xd;
|
||||
else if (intel_crtc->config->port_clock > 67500)
|
||||
stagger = 0x7;
|
||||
else if (intel_crtc->config->port_clock > 33750)
|
||||
stagger = 0x4;
|
||||
else
|
||||
stagger = 0x2;
|
||||
|
||||
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
|
||||
val |= DPIO_TX2_STAGGER_MASK(0x1f);
|
||||
vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
|
||||
|
||||
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
|
||||
val |= DPIO_TX2_STAGGER_MASK(0x1f);
|
||||
vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
|
||||
|
||||
vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW12(ch),
|
||||
DPIO_LANESTAGGER_STRAP(stagger) |
|
||||
DPIO_LANESTAGGER_STRAP_OVRD |
|
||||
DPIO_TX1_STAGGER_MASK(0x1f) |
|
||||
DPIO_TX1_STAGGER_MULT(6) |
|
||||
DPIO_TX2_STAGGER_MULT(0));
|
||||
|
||||
vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW12(ch),
|
||||
DPIO_LANESTAGGER_STRAP(stagger) |
|
||||
DPIO_LANESTAGGER_STRAP_OVRD |
|
||||
DPIO_TX1_STAGGER_MASK(0x1f) |
|
||||
DPIO_TX1_STAGGER_MULT(7) |
|
||||
DPIO_TX2_STAGGER_MULT(5));
|
||||
|
||||
/* Clear calc init */
|
||||
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
|
||||
@ -1613,7 +1641,7 @@ static void chv_hdmi_pre_enable(struct intel_encoder *encoder)
|
||||
|
||||
intel_enable_hdmi(encoder);
|
||||
|
||||
vlv_wait_port_ready(dev_priv, dport);
|
||||
vlv_wait_port_ready(dev_priv, dport, 0x0);
|
||||
}
|
||||
|
||||
static void intel_hdmi_destroy(struct drm_connector *connector)
|
||||
|
@ -1895,10 +1895,9 @@ int intel_lr_context_deferred_create(struct intel_context *ctx,
|
||||
context_size = round_up(get_lr_context_size(ring), 4096);
|
||||
|
||||
ctx_obj = i915_gem_alloc_object(dev, context_size);
|
||||
if (IS_ERR(ctx_obj)) {
|
||||
ret = PTR_ERR(ctx_obj);
|
||||
DRM_DEBUG_DRIVER("Alloc LRC backing obj failed: %d\n", ret);
|
||||
return ret;
|
||||
if (!ctx_obj) {
|
||||
DRM_DEBUG_DRIVER("Alloc LRC backing obj failed.\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
if (is_global_default_ctx) {
|
||||
|
@ -536,6 +536,14 @@ static u32 vlv_get_backlight(struct intel_connector *connector)
|
||||
return _vlv_get_backlight(dev, pipe);
|
||||
}
|
||||
|
||||
static u32 bxt_get_backlight(struct intel_connector *connector)
|
||||
{
|
||||
struct drm_device *dev = connector->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
return I915_READ(BXT_BLC_PWM_DUTY1);
|
||||
}
|
||||
|
||||
static u32 intel_panel_get_backlight(struct intel_connector *connector)
|
||||
{
|
||||
struct drm_device *dev = connector->base.dev;
|
||||
@ -616,6 +624,14 @@ static void vlv_set_backlight(struct intel_connector *connector, u32 level)
|
||||
I915_WRITE(VLV_BLC_PWM_CTL(pipe), tmp | level);
|
||||
}
|
||||
|
||||
static void bxt_set_backlight(struct intel_connector *connector, u32 level)
|
||||
{
|
||||
struct drm_device *dev = connector->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
I915_WRITE(BXT_BLC_PWM_DUTY1, level);
|
||||
}
|
||||
|
||||
static void
|
||||
intel_panel_actually_set_backlight(struct intel_connector *connector, u32 level)
|
||||
{
|
||||
@ -741,6 +757,18 @@ static void vlv_disable_backlight(struct intel_connector *connector)
|
||||
I915_WRITE(VLV_BLC_PWM_CTL2(pipe), tmp & ~BLM_PWM_ENABLE);
|
||||
}
|
||||
|
||||
static void bxt_disable_backlight(struct intel_connector *connector)
|
||||
{
|
||||
struct drm_device *dev = connector->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
u32 tmp;
|
||||
|
||||
intel_panel_actually_set_backlight(connector, 0);
|
||||
|
||||
tmp = I915_READ(BXT_BLC_PWM_CTL1);
|
||||
I915_WRITE(BXT_BLC_PWM_CTL1, tmp & ~BXT_BLC_PWM_ENABLE);
|
||||
}
|
||||
|
||||
void intel_panel_disable_backlight(struct intel_connector *connector)
|
||||
{
|
||||
struct drm_device *dev = connector->base.dev;
|
||||
@ -947,6 +975,33 @@ static void vlv_enable_backlight(struct intel_connector *connector)
|
||||
I915_WRITE(VLV_BLC_PWM_CTL2(pipe), ctl2 | BLM_PWM_ENABLE);
|
||||
}
|
||||
|
||||
static void bxt_enable_backlight(struct intel_connector *connector)
|
||||
{
|
||||
struct drm_device *dev = connector->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_panel *panel = &connector->panel;
|
||||
u32 pwm_ctl;
|
||||
|
||||
pwm_ctl = I915_READ(BXT_BLC_PWM_CTL1);
|
||||
if (pwm_ctl & BXT_BLC_PWM_ENABLE) {
|
||||
DRM_DEBUG_KMS("backlight already enabled\n");
|
||||
pwm_ctl &= ~BXT_BLC_PWM_ENABLE;
|
||||
I915_WRITE(BXT_BLC_PWM_CTL1, pwm_ctl);
|
||||
}
|
||||
|
||||
I915_WRITE(BXT_BLC_PWM_FREQ1, panel->backlight.max);
|
||||
|
||||
intel_panel_actually_set_backlight(connector, panel->backlight.level);
|
||||
|
||||
pwm_ctl = 0;
|
||||
if (panel->backlight.active_low_pwm)
|
||||
pwm_ctl |= BXT_BLC_PWM_POLARITY;
|
||||
|
||||
I915_WRITE(BXT_BLC_PWM_CTL1, pwm_ctl);
|
||||
POSTING_READ(BXT_BLC_PWM_CTL1);
|
||||
I915_WRITE(BXT_BLC_PWM_CTL1, pwm_ctl | BXT_BLC_PWM_ENABLE);
|
||||
}
|
||||
|
||||
void intel_panel_enable_backlight(struct intel_connector *connector)
|
||||
{
|
||||
struct drm_device *dev = connector->base.dev;
|
||||
@ -1299,6 +1354,30 @@ static int vlv_setup_backlight(struct intel_connector *connector, enum pipe pipe
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
bxt_setup_backlight(struct intel_connector *connector, enum pipe unused)
|
||||
{
|
||||
struct drm_device *dev = connector->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_panel *panel = &connector->panel;
|
||||
u32 pwm_ctl, val;
|
||||
|
||||
pwm_ctl = I915_READ(BXT_BLC_PWM_CTL1);
|
||||
panel->backlight.active_low_pwm = pwm_ctl & BXT_BLC_PWM_POLARITY;
|
||||
|
||||
panel->backlight.max = I915_READ(BXT_BLC_PWM_FREQ1);
|
||||
if (!panel->backlight.max)
|
||||
return -ENODEV;
|
||||
|
||||
val = bxt_get_backlight(connector);
|
||||
panel->backlight.level = intel_panel_compute_brightness(connector, val);
|
||||
|
||||
panel->backlight.enabled = (pwm_ctl & BXT_BLC_PWM_ENABLE) &&
|
||||
panel->backlight.level != 0;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int intel_panel_setup_backlight(struct drm_connector *connector, enum pipe pipe)
|
||||
{
|
||||
struct drm_device *dev = connector->dev;
|
||||
@ -1350,7 +1429,13 @@ void intel_panel_init_backlight_funcs(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
if (IS_BROADWELL(dev) || (INTEL_INFO(dev)->gen >= 9)) {
|
||||
if (IS_BROXTON(dev)) {
|
||||
dev_priv->display.setup_backlight = bxt_setup_backlight;
|
||||
dev_priv->display.enable_backlight = bxt_enable_backlight;
|
||||
dev_priv->display.disable_backlight = bxt_disable_backlight;
|
||||
dev_priv->display.set_backlight = bxt_set_backlight;
|
||||
dev_priv->display.get_backlight = bxt_get_backlight;
|
||||
} else if (IS_BROADWELL(dev) || IS_SKYLAKE(dev)) {
|
||||
dev_priv->display.setup_backlight = bdw_setup_backlight;
|
||||
dev_priv->display.enable_backlight = bdw_enable_backlight;
|
||||
dev_priv->display.disable_backlight = pch_disable_backlight;
|
||||
|
@ -88,8 +88,7 @@ static void skl_init_clock_gating(struct drm_device *dev)
|
||||
|
||||
/* WaDisableChickenBitTSGBarrierAckForFFSliceCS:skl */
|
||||
I915_WRITE(FF_SLICE_CS_CHICKEN2,
|
||||
I915_READ(FF_SLICE_CS_CHICKEN2) |
|
||||
GEN9_TSG_BARRIER_ACK_DISABLE);
|
||||
_MASKED_BIT_ENABLE(GEN9_TSG_BARRIER_ACK_DISABLE));
|
||||
}
|
||||
|
||||
if (INTEL_REVID(dev) <= SKL_REVID_E0)
|
||||
@ -4295,8 +4294,8 @@ static void gen6_init_rps_frequencies(struct drm_device *dev)
|
||||
if (dev_priv->rps.min_freq_softlimit == 0) {
|
||||
if (IS_HASWELL(dev) || IS_BROADWELL(dev))
|
||||
dev_priv->rps.min_freq_softlimit =
|
||||
/* max(RPe, 450 MHz) */
|
||||
max(dev_priv->rps.efficient_freq, (u8) 9);
|
||||
max_t(int, dev_priv->rps.efficient_freq,
|
||||
intel_freq_opcode(dev_priv, 450));
|
||||
else
|
||||
dev_priv->rps.min_freq_softlimit =
|
||||
dev_priv->rps.min_freq;
|
||||
@ -5082,6 +5081,12 @@ static void cherryview_enable_rps(struct drm_device *dev)
|
||||
GEN6_RP_UP_BUSY_AVG |
|
||||
GEN6_RP_DOWN_IDLE_AVG);
|
||||
|
||||
/* Setting Fixed Bias */
|
||||
val = VLV_OVERRIDE_EN |
|
||||
VLV_SOC_TDP_EN |
|
||||
CHV_BIAS_CPU_50_SOC_50;
|
||||
vlv_punit_write(dev_priv, VLV_TURBO_SOC_OVERRIDE, val);
|
||||
|
||||
val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
|
||||
|
||||
/* RPS code assumes GPLL is used */
|
||||
@ -5166,6 +5171,12 @@ static void valleyview_enable_rps(struct drm_device *dev)
|
||||
|
||||
I915_WRITE(GEN6_RC_CONTROL, rc6_mode);
|
||||
|
||||
/* Setting Fixed Bias */
|
||||
val = VLV_OVERRIDE_EN |
|
||||
VLV_SOC_TDP_EN |
|
||||
VLV_BIAS_CPU_125_SOC_875;
|
||||
vlv_punit_write(dev_priv, VLV_TURBO_SOC_OVERRIDE, val);
|
||||
|
||||
val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
|
||||
|
||||
/* RPS code assumes GPLL is used */
|
||||
|
@ -919,53 +919,45 @@ static int gen9_init_workarounds(struct intel_engine_cs *ring)
|
||||
struct drm_device *dev = ring->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
/* WaDisablePartialInstShootdown:skl */
|
||||
/* WaDisablePartialInstShootdown:skl,bxt */
|
||||
WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
|
||||
PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE);
|
||||
|
||||
/* Syncing dependencies between camera and graphics */
|
||||
/* Syncing dependencies between camera and graphics:skl,bxt */
|
||||
WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
|
||||
GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC);
|
||||
|
||||
if (INTEL_REVID(dev) == SKL_REVID_A0 ||
|
||||
INTEL_REVID(dev) == SKL_REVID_B0) {
|
||||
/* WaDisableDgMirrorFixInHalfSliceChicken5:skl */
|
||||
if ((IS_SKYLAKE(dev) && (INTEL_REVID(dev) == SKL_REVID_A0 ||
|
||||
INTEL_REVID(dev) == SKL_REVID_B0)) ||
|
||||
(IS_BROXTON(dev) && INTEL_REVID(dev) < BXT_REVID_B0)) {
|
||||
/* WaDisableDgMirrorFixInHalfSliceChicken5:skl,bxt */
|
||||
WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5,
|
||||
GEN9_DG_MIRROR_FIX_ENABLE);
|
||||
}
|
||||
|
||||
if (IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0) {
|
||||
/* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:skl */
|
||||
if ((IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0) ||
|
||||
(IS_BROXTON(dev) && INTEL_REVID(dev) < BXT_REVID_B0)) {
|
||||
/* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:skl,bxt */
|
||||
WA_SET_BIT_MASKED(GEN7_COMMON_SLICE_CHICKEN1,
|
||||
GEN9_RHWO_OPTIMIZATION_DISABLE);
|
||||
WA_SET_BIT_MASKED(GEN9_SLICE_COMMON_ECO_CHICKEN0,
|
||||
DISABLE_PIXEL_MASK_CAMMING);
|
||||
}
|
||||
|
||||
if (INTEL_REVID(dev) >= SKL_REVID_C0) {
|
||||
/* WaEnableYV12BugFixInHalfSliceChicken7:skl */
|
||||
if ((IS_SKYLAKE(dev) && INTEL_REVID(dev) >= SKL_REVID_C0) ||
|
||||
IS_BROXTON(dev)) {
|
||||
/* WaEnableYV12BugFixInHalfSliceChicken7:skl,bxt */
|
||||
WA_SET_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN7,
|
||||
GEN9_ENABLE_YV12_BUGFIX);
|
||||
}
|
||||
|
||||
if (INTEL_REVID(dev) <= SKL_REVID_D0) {
|
||||
/*
|
||||
*Use Force Non-Coherent whenever executing a 3D context. This
|
||||
* is a workaround for a possible hang in the unlikely event
|
||||
* a TLB invalidation occurs during a PSD flush.
|
||||
*/
|
||||
/* WaForceEnableNonCoherent:skl */
|
||||
WA_SET_BIT_MASKED(HDC_CHICKEN0,
|
||||
HDC_FORCE_NON_COHERENT);
|
||||
}
|
||||
|
||||
/* Wa4x4STCOptimizationDisable:skl */
|
||||
/* Wa4x4STCOptimizationDisable:skl,bxt */
|
||||
WA_SET_BIT_MASKED(CACHE_MODE_1, GEN8_4x4_STC_OPTIMIZATION_DISABLE);
|
||||
|
||||
/* WaDisablePartialResolveInVc:skl */
|
||||
/* WaDisablePartialResolveInVc:skl,bxt */
|
||||
WA_SET_BIT_MASKED(CACHE_MODE_1, GEN9_PARTIAL_RESOLVE_IN_VC_DISABLE);
|
||||
|
||||
/* WaCcsTlbPrefetchDisable:skl */
|
||||
/* WaCcsTlbPrefetchDisable:skl,bxt */
|
||||
WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5,
|
||||
GEN9_CCS_TLB_PREFETCH_ENABLE);
|
||||
|
||||
@ -1036,13 +1028,42 @@ static int skl_init_workarounds(struct intel_engine_cs *ring)
|
||||
WA_SET_BIT_MASKED(HIZ_CHICKEN,
|
||||
BDW_HIZ_POWER_COMPILER_CLOCK_GATING_DISABLE);
|
||||
|
||||
if (INTEL_REVID(dev) <= SKL_REVID_D0) {
|
||||
/*
|
||||
*Use Force Non-Coherent whenever executing a 3D context. This
|
||||
* is a workaround for a possible hang in the unlikely event
|
||||
* a TLB invalidation occurs during a PSD flush.
|
||||
*/
|
||||
/* WaForceEnableNonCoherent:skl */
|
||||
WA_SET_BIT_MASKED(HDC_CHICKEN0,
|
||||
HDC_FORCE_NON_COHERENT);
|
||||
}
|
||||
|
||||
return skl_tune_iz_hashing(ring);
|
||||
}
|
||||
|
||||
static int bxt_init_workarounds(struct intel_engine_cs *ring)
|
||||
{
|
||||
struct drm_device *dev = ring->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
gen9_init_workarounds(ring);
|
||||
|
||||
/* WaDisableThreadStallDopClockGating:bxt */
|
||||
WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
|
||||
STALL_DOP_GATING_DISABLE);
|
||||
|
||||
/* WaDisableSbeCacheDispatchPortSharing:bxt */
|
||||
if (INTEL_REVID(dev) <= BXT_REVID_B0) {
|
||||
WA_SET_BIT_MASKED(
|
||||
GEN7_HALF_SLICE_CHICKEN1,
|
||||
GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
|
||||
}
|
||||
|
||||
/* WaForceContextSaveRestoreNonCoherent:bxt */
|
||||
WA_SET_BIT_MASKED(HDC_CHICKEN0,
|
||||
HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -49,6 +49,9 @@
|
||||
* present for a given platform.
|
||||
*/
|
||||
|
||||
#define GEN9_ENABLE_DC5(dev) 0
|
||||
#define SKL_ENABLE_DC6(dev) IS_SKYLAKE(dev)
|
||||
|
||||
#define for_each_power_well(i, power_well, domain_mask, power_domains) \
|
||||
for (i = 0; \
|
||||
i < (power_domains)->power_well_count && \
|
||||
@ -62,6 +65,9 @@
|
||||
i--) \
|
||||
if ((power_well)->domains & (domain_mask))
|
||||
|
||||
bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
|
||||
int power_well_id);
|
||||
|
||||
/*
|
||||
* We should only use the power well if we explicitly asked the hardware to
|
||||
* enable it, so check if it's enabled and also check if we've requested it to
|
||||
@ -308,7 +314,9 @@ static void hsw_set_power_well(struct drm_i915_private *dev_priv,
|
||||
BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
|
||||
BIT(POWER_DOMAIN_INIT))
|
||||
#define SKL_DISPLAY_MISC_IO_POWER_DOMAINS ( \
|
||||
SKL_DISPLAY_POWERWELL_1_POWER_DOMAINS)
|
||||
SKL_DISPLAY_POWERWELL_1_POWER_DOMAINS | \
|
||||
BIT(POWER_DOMAIN_PLLS) | \
|
||||
BIT(POWER_DOMAIN_INIT))
|
||||
#define SKL_DISPLAY_ALWAYS_ON_POWER_DOMAINS ( \
|
||||
(POWER_DOMAIN_MASK & ~(SKL_DISPLAY_POWERWELL_1_POWER_DOMAINS | \
|
||||
SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
|
||||
@ -417,9 +425,148 @@ void bxt_disable_dc9(struct drm_i915_private *dev_priv)
|
||||
POSTING_READ(DC_STATE_EN);
|
||||
}
|
||||
|
||||
static void gen9_set_dc_state_debugmask_memory_up(
|
||||
struct drm_i915_private *dev_priv)
|
||||
{
|
||||
uint32_t val;
|
||||
|
||||
/* The below bit doesn't need to be cleared ever afterwards */
|
||||
val = I915_READ(DC_STATE_DEBUG);
|
||||
if (!(val & DC_STATE_DEBUG_MASK_MEMORY_UP)) {
|
||||
val |= DC_STATE_DEBUG_MASK_MEMORY_UP;
|
||||
I915_WRITE(DC_STATE_DEBUG, val);
|
||||
POSTING_READ(DC_STATE_DEBUG);
|
||||
}
|
||||
}
|
||||
|
||||
static void assert_can_enable_dc5(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
bool pg2_enabled = intel_display_power_well_is_enabled(dev_priv,
|
||||
SKL_DISP_PW_2);
|
||||
|
||||
WARN(!IS_SKYLAKE(dev), "Platform doesn't support DC5.\n");
|
||||
WARN(!HAS_RUNTIME_PM(dev), "Runtime PM not enabled.\n");
|
||||
WARN(pg2_enabled, "PG2 not disabled to enable DC5.\n");
|
||||
|
||||
WARN((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5),
|
||||
"DC5 already programmed to be enabled.\n");
|
||||
WARN(dev_priv->pm.suspended,
|
||||
"DC5 cannot be enabled, if platform is runtime-suspended.\n");
|
||||
|
||||
assert_csr_loaded(dev_priv);
|
||||
}
|
||||
|
||||
static void assert_can_disable_dc5(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
bool pg2_enabled = intel_display_power_well_is_enabled(dev_priv,
|
||||
SKL_DISP_PW_2);
|
||||
/*
|
||||
* During initialization, the firmware may not be loaded yet.
|
||||
* We still want to make sure that the DC enabling flag is cleared.
|
||||
*/
|
||||
if (dev_priv->power_domains.initializing)
|
||||
return;
|
||||
|
||||
WARN(!pg2_enabled, "PG2 not enabled to disable DC5.\n");
|
||||
WARN(dev_priv->pm.suspended,
|
||||
"Disabling of DC5 while platform is runtime-suspended should never happen.\n");
|
||||
}
|
||||
|
||||
static void gen9_enable_dc5(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
uint32_t val;
|
||||
|
||||
assert_can_enable_dc5(dev_priv);
|
||||
|
||||
DRM_DEBUG_KMS("Enabling DC5\n");
|
||||
|
||||
gen9_set_dc_state_debugmask_memory_up(dev_priv);
|
||||
|
||||
val = I915_READ(DC_STATE_EN);
|
||||
val &= ~DC_STATE_EN_UPTO_DC5_DC6_MASK;
|
||||
val |= DC_STATE_EN_UPTO_DC5;
|
||||
I915_WRITE(DC_STATE_EN, val);
|
||||
POSTING_READ(DC_STATE_EN);
|
||||
}
|
||||
|
||||
static void gen9_disable_dc5(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
uint32_t val;
|
||||
|
||||
assert_can_disable_dc5(dev_priv);
|
||||
|
||||
DRM_DEBUG_KMS("Disabling DC5\n");
|
||||
|
||||
val = I915_READ(DC_STATE_EN);
|
||||
val &= ~DC_STATE_EN_UPTO_DC5;
|
||||
I915_WRITE(DC_STATE_EN, val);
|
||||
POSTING_READ(DC_STATE_EN);
|
||||
}
|
||||
|
||||
static void assert_can_enable_dc6(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
|
||||
WARN(!IS_SKYLAKE(dev), "Platform doesn't support DC6.\n");
|
||||
WARN(!HAS_RUNTIME_PM(dev), "Runtime PM not enabled.\n");
|
||||
WARN(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
|
||||
"Backlight is not disabled.\n");
|
||||
WARN((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC6),
|
||||
"DC6 already programmed to be enabled.\n");
|
||||
|
||||
assert_csr_loaded(dev_priv);
|
||||
}
|
||||
|
||||
static void assert_can_disable_dc6(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
/*
|
||||
* During initialization, the firmware may not be loaded yet.
|
||||
* We still want to make sure that the DC enabling flag is cleared.
|
||||
*/
|
||||
if (dev_priv->power_domains.initializing)
|
||||
return;
|
||||
|
||||
assert_csr_loaded(dev_priv);
|
||||
WARN(!(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC6),
|
||||
"DC6 already programmed to be disabled.\n");
|
||||
}
|
||||
|
||||
static void skl_enable_dc6(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
uint32_t val;
|
||||
|
||||
assert_can_enable_dc6(dev_priv);
|
||||
|
||||
DRM_DEBUG_KMS("Enabling DC6\n");
|
||||
|
||||
gen9_set_dc_state_debugmask_memory_up(dev_priv);
|
||||
|
||||
val = I915_READ(DC_STATE_EN);
|
||||
val &= ~DC_STATE_EN_UPTO_DC5_DC6_MASK;
|
||||
val |= DC_STATE_EN_UPTO_DC6;
|
||||
I915_WRITE(DC_STATE_EN, val);
|
||||
POSTING_READ(DC_STATE_EN);
|
||||
}
|
||||
|
||||
static void skl_disable_dc6(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
uint32_t val;
|
||||
|
||||
assert_can_disable_dc6(dev_priv);
|
||||
|
||||
DRM_DEBUG_KMS("Disabling DC6\n");
|
||||
|
||||
val = I915_READ(DC_STATE_EN);
|
||||
val &= ~DC_STATE_EN_UPTO_DC6;
|
||||
I915_WRITE(DC_STATE_EN, val);
|
||||
POSTING_READ(DC_STATE_EN);
|
||||
}
|
||||
|
||||
static void skl_set_power_well(struct drm_i915_private *dev_priv,
|
||||
struct i915_power_well *power_well, bool enable)
|
||||
{
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
uint32_t tmp, fuse_status;
|
||||
uint32_t req_mask, state_mask;
|
||||
bool is_enabled, enable_requested, check_fuse_status = false;
|
||||
@ -459,6 +606,25 @@ static void skl_set_power_well(struct drm_i915_private *dev_priv,
|
||||
|
||||
if (enable) {
|
||||
if (!enable_requested) {
|
||||
WARN((tmp & state_mask) &&
|
||||
!I915_READ(HSW_PWR_WELL_BIOS),
|
||||
"Invalid for power well status to be enabled, unless done by the BIOS, \
|
||||
when request is to disable!\n");
|
||||
if ((GEN9_ENABLE_DC5(dev) || SKL_ENABLE_DC6(dev)) &&
|
||||
power_well->data == SKL_DISP_PW_2) {
|
||||
if (SKL_ENABLE_DC6(dev)) {
|
||||
skl_disable_dc6(dev_priv);
|
||||
/*
|
||||
* DDI buffer programming unnecessary during driver-load/resume
|
||||
* as it's already done during modeset initialization then.
|
||||
* It's also invalid here as encoder list is still uninitialized.
|
||||
*/
|
||||
if (!dev_priv->power_domains.initializing)
|
||||
intel_prepare_ddi(dev);
|
||||
} else {
|
||||
gen9_disable_dc5(dev_priv);
|
||||
}
|
||||
}
|
||||
I915_WRITE(HSW_PWR_WELL_DRIVER, tmp | req_mask);
|
||||
}
|
||||
|
||||
@ -475,6 +641,25 @@ static void skl_set_power_well(struct drm_i915_private *dev_priv,
|
||||
I915_WRITE(HSW_PWR_WELL_DRIVER, tmp & ~req_mask);
|
||||
POSTING_READ(HSW_PWR_WELL_DRIVER);
|
||||
DRM_DEBUG_KMS("Disabling %s\n", power_well->name);
|
||||
|
||||
if ((GEN9_ENABLE_DC5(dev) || SKL_ENABLE_DC6(dev)) &&
|
||||
power_well->data == SKL_DISP_PW_2) {
|
||||
enum csr_state state;
|
||||
/* TODO: wait for a completion event or
|
||||
* similar here instead of busy
|
||||
* waiting using wait_for function.
|
||||
*/
|
||||
wait_for((state = intel_csr_load_status_get(dev_priv)) !=
|
||||
FW_UNINITIALIZED, 1000);
|
||||
if (state != FW_LOADED)
|
||||
DRM_ERROR("CSR firmware not ready (%d)\n",
|
||||
state);
|
||||
else
|
||||
if (SKL_ENABLE_DC6(dev))
|
||||
skl_enable_dc6(dev_priv);
|
||||
else
|
||||
gen9_enable_dc5(dev_priv);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -764,8 +949,8 @@ static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
|
||||
if (wait_for(I915_READ(DISPLAY_PHY_STATUS) & PHY_POWERGOOD(phy), 1))
|
||||
DRM_ERROR("Display PHY %d is not power up\n", phy);
|
||||
|
||||
I915_WRITE(DISPLAY_PHY_CONTROL, I915_READ(DISPLAY_PHY_CONTROL) |
|
||||
PHY_COM_LANE_RESET_DEASSERT(phy));
|
||||
dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(phy);
|
||||
I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
|
||||
}
|
||||
|
||||
static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
|
||||
@ -785,8 +970,8 @@ static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
|
||||
assert_pll_disabled(dev_priv, PIPE_C);
|
||||
}
|
||||
|
||||
I915_WRITE(DISPLAY_PHY_CONTROL, I915_READ(DISPLAY_PHY_CONTROL) &
|
||||
~PHY_COM_LANE_RESET_DEASSERT(phy));
|
||||
dev_priv->chv_phy_control &= ~PHY_COM_LANE_RESET_DEASSERT(phy);
|
||||
I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
|
||||
|
||||
vlv_set_power_well(dev_priv, power_well, false);
|
||||
}
|
||||
@ -1279,23 +1464,13 @@ static struct i915_power_well chv_power_wells[] = {
|
||||
#endif
|
||||
{
|
||||
.name = "dpio-common-bc",
|
||||
/*
|
||||
* XXX: cmnreset for one PHY seems to disturb the other.
|
||||
* As a workaround keep both powered on at the same
|
||||
* time for now.
|
||||
*/
|
||||
.domains = CHV_DPIO_CMN_BC_POWER_DOMAINS | CHV_DPIO_CMN_D_POWER_DOMAINS,
|
||||
.domains = CHV_DPIO_CMN_BC_POWER_DOMAINS,
|
||||
.data = PUNIT_POWER_WELL_DPIO_CMN_BC,
|
||||
.ops = &chv_dpio_cmn_power_well_ops,
|
||||
},
|
||||
{
|
||||
.name = "dpio-common-d",
|
||||
/*
|
||||
* XXX: cmnreset for one PHY seems to disturb the other.
|
||||
* As a workaround keep both powered on at the same
|
||||
* time for now.
|
||||
*/
|
||||
.domains = CHV_DPIO_CMN_BC_POWER_DOMAINS | CHV_DPIO_CMN_D_POWER_DOMAINS,
|
||||
.domains = CHV_DPIO_CMN_D_POWER_DOMAINS,
|
||||
.data = PUNIT_POWER_WELL_DPIO_CMN_D,
|
||||
.ops = &chv_dpio_cmn_power_well_ops,
|
||||
},
|
||||
@ -1346,7 +1521,7 @@ static struct i915_power_well chv_power_wells[] = {
|
||||
};
|
||||
|
||||
static struct i915_power_well *lookup_power_well(struct drm_i915_private *dev_priv,
|
||||
enum punit_power_well power_well_id)
|
||||
int power_well_id)
|
||||
{
|
||||
struct i915_power_domains *power_domains = &dev_priv->power_domains;
|
||||
struct i915_power_well *power_well;
|
||||
@ -1360,6 +1535,18 @@ static struct i915_power_well *lookup_power_well(struct drm_i915_private *dev_pr
|
||||
return NULL;
|
||||
}
|
||||
|
||||
bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
|
||||
int power_well_id)
|
||||
{
|
||||
struct i915_power_well *power_well;
|
||||
bool ret;
|
||||
|
||||
power_well = lookup_power_well(dev_priv, power_well_id);
|
||||
ret = power_well->ops->is_enabled(dev_priv, power_well);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static struct i915_power_well skl_power_wells[] = {
|
||||
{
|
||||
.name = "always-on",
|
||||
@ -1522,6 +1709,30 @@ static void intel_power_domains_resume(struct drm_i915_private *dev_priv)
|
||||
mutex_unlock(&power_domains->lock);
|
||||
}
|
||||
|
||||
static void chv_phy_control_init(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct i915_power_well *cmn_bc =
|
||||
lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC);
|
||||
struct i915_power_well *cmn_d =
|
||||
lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_D);
|
||||
|
||||
/*
|
||||
* DISPLAY_PHY_CONTROL can get corrupted if read. As a
|
||||
* workaround never ever read DISPLAY_PHY_CONTROL, and
|
||||
* instead maintain a shadow copy ourselves. Use the actual
|
||||
* power well state to reconstruct the expected initial
|
||||
* value.
|
||||
*/
|
||||
dev_priv->chv_phy_control =
|
||||
PHY_CH_POWER_MODE(PHY_CH_SU_PSR, DPIO_PHY0, DPIO_CH0) |
|
||||
PHY_CH_POWER_MODE(PHY_CH_SU_PSR, DPIO_PHY0, DPIO_CH1) |
|
||||
PHY_CH_POWER_MODE(PHY_CH_SU_PSR, DPIO_PHY1, DPIO_CH0);
|
||||
if (cmn_bc->ops->is_enabled(dev_priv, cmn_bc))
|
||||
dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY0);
|
||||
if (cmn_d->ops->is_enabled(dev_priv, cmn_d))
|
||||
dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY1);
|
||||
}
|
||||
|
||||
static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct i915_power_well *cmn =
|
||||
@ -1564,7 +1775,9 @@ void intel_power_domains_init_hw(struct drm_i915_private *dev_priv)
|
||||
|
||||
power_domains->initializing = true;
|
||||
|
||||
if (IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) {
|
||||
if (IS_CHERRYVIEW(dev)) {
|
||||
chv_phy_control_init(dev_priv);
|
||||
} else if (IS_VALLEYVIEW(dev)) {
|
||||
mutex_lock(&power_domains->lock);
|
||||
vlv_cmnlane_wa(dev_priv);
|
||||
mutex_unlock(&power_domains->lock);
|
||||
|
@ -242,7 +242,7 @@ static void intel_sdvo_write_sdvox(struct intel_sdvo *intel_sdvo, u32 val)
|
||||
|
||||
if (intel_sdvo->sdvo_reg == PCH_SDVOB) {
|
||||
I915_WRITE(intel_sdvo->sdvo_reg, val);
|
||||
I915_READ(intel_sdvo->sdvo_reg);
|
||||
POSTING_READ(intel_sdvo->sdvo_reg);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -259,9 +259,9 @@ static void intel_sdvo_write_sdvox(struct intel_sdvo *intel_sdvo, u32 val)
|
||||
for (i = 0; i < 2; i++)
|
||||
{
|
||||
I915_WRITE(GEN3_SDVOB, bval);
|
||||
I915_READ(GEN3_SDVOB);
|
||||
POSTING_READ(GEN3_SDVOB);
|
||||
I915_WRITE(GEN3_SDVOC, cval);
|
||||
I915_READ(GEN3_SDVOC);
|
||||
POSTING_READ(GEN3_SDVOC);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -33,6 +33,7 @@
|
||||
#include <drm/drm_crtc.h>
|
||||
#include <drm/drm_fourcc.h>
|
||||
#include <drm/drm_rect.h>
|
||||
#include <drm/drm_atomic.h>
|
||||
#include <drm/drm_plane_helper.h>
|
||||
#include "intel_drv.h"
|
||||
#include <drm/i915_drm.h>
|
||||
@ -165,17 +166,6 @@ void intel_pipe_update_end(struct intel_crtc *crtc, u32 start_vbl_count)
|
||||
pipe_name(pipe), start_vbl_count, end_vbl_count);
|
||||
}
|
||||
|
||||
static void intel_update_primary_plane(struct intel_crtc *crtc)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
|
||||
int reg = DSPCNTR(crtc->plane);
|
||||
|
||||
if (crtc->primary_enabled)
|
||||
I915_WRITE(reg, I915_READ(reg) | DISPLAY_PLANE_ENABLE);
|
||||
else
|
||||
I915_WRITE(reg, I915_READ(reg) & ~DISPLAY_PLANE_ENABLE);
|
||||
}
|
||||
|
||||
static void
|
||||
skl_update_plane(struct drm_plane *drm_plane, struct drm_crtc *crtc,
|
||||
struct drm_framebuffer *fb,
|
||||
@ -197,80 +187,17 @@ skl_update_plane(struct drm_plane *drm_plane, struct drm_crtc *crtc,
|
||||
u32 tile_height, plane_offset, plane_size;
|
||||
unsigned int rotation;
|
||||
int x_offset, y_offset;
|
||||
struct intel_crtc_state *crtc_state = to_intel_crtc(crtc)->config;
|
||||
int scaler_id;
|
||||
|
||||
plane_ctl = PLANE_CTL_ENABLE |
|
||||
PLANE_CTL_PIPE_CSC_ENABLE;
|
||||
|
||||
switch (fb->pixel_format) {
|
||||
case DRM_FORMAT_RGB565:
|
||||
plane_ctl |= PLANE_CTL_FORMAT_RGB_565;
|
||||
break;
|
||||
case DRM_FORMAT_XBGR8888:
|
||||
plane_ctl |= PLANE_CTL_FORMAT_XRGB_8888 | PLANE_CTL_ORDER_RGBX;
|
||||
break;
|
||||
case DRM_FORMAT_XRGB8888:
|
||||
plane_ctl |= PLANE_CTL_FORMAT_XRGB_8888;
|
||||
break;
|
||||
/*
|
||||
* XXX: For ARBG/ABGR formats we default to expecting scanout buffers
|
||||
* to be already pre-multiplied. We need to add a knob (or a different
|
||||
* DRM_FORMAT) for user-space to configure that.
|
||||
*/
|
||||
case DRM_FORMAT_ABGR8888:
|
||||
plane_ctl |= PLANE_CTL_FORMAT_XRGB_8888 |
|
||||
PLANE_CTL_ORDER_RGBX |
|
||||
PLANE_CTL_ALPHA_SW_PREMULTIPLY;
|
||||
break;
|
||||
case DRM_FORMAT_ARGB8888:
|
||||
plane_ctl |= PLANE_CTL_FORMAT_XRGB_8888 |
|
||||
PLANE_CTL_ALPHA_SW_PREMULTIPLY;
|
||||
break;
|
||||
case DRM_FORMAT_YUYV:
|
||||
plane_ctl |= PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YUYV;
|
||||
break;
|
||||
case DRM_FORMAT_YVYU:
|
||||
plane_ctl |= PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YVYU;
|
||||
break;
|
||||
case DRM_FORMAT_UYVY:
|
||||
plane_ctl |= PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_UYVY;
|
||||
break;
|
||||
case DRM_FORMAT_VYUY:
|
||||
plane_ctl |= PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_VYUY;
|
||||
break;
|
||||
default:
|
||||
BUG();
|
||||
}
|
||||
|
||||
switch (fb->modifier[0]) {
|
||||
case DRM_FORMAT_MOD_NONE:
|
||||
break;
|
||||
case I915_FORMAT_MOD_X_TILED:
|
||||
plane_ctl |= PLANE_CTL_TILED_X;
|
||||
break;
|
||||
case I915_FORMAT_MOD_Y_TILED:
|
||||
plane_ctl |= PLANE_CTL_TILED_Y;
|
||||
break;
|
||||
case I915_FORMAT_MOD_Yf_TILED:
|
||||
plane_ctl |= PLANE_CTL_TILED_YF;
|
||||
break;
|
||||
default:
|
||||
MISSING_CASE(fb->modifier[0]);
|
||||
}
|
||||
plane_ctl |= skl_plane_ctl_format(fb->pixel_format);
|
||||
plane_ctl |= skl_plane_ctl_tiling(fb->modifier[0]);
|
||||
|
||||
rotation = drm_plane->state->rotation;
|
||||
switch (rotation) {
|
||||
case BIT(DRM_ROTATE_90):
|
||||
plane_ctl |= PLANE_CTL_ROTATE_90;
|
||||
break;
|
||||
|
||||
case BIT(DRM_ROTATE_180):
|
||||
plane_ctl |= PLANE_CTL_ROTATE_180;
|
||||
break;
|
||||
|
||||
case BIT(DRM_ROTATE_270):
|
||||
plane_ctl |= PLANE_CTL_ROTATE_270;
|
||||
break;
|
||||
}
|
||||
plane_ctl |= skl_plane_ctl_rotation(rotation);
|
||||
|
||||
intel_update_sprite_watermarks(drm_plane, crtc, src_w, src_h,
|
||||
pixel_size, true,
|
||||
@ -279,6 +206,8 @@ skl_update_plane(struct drm_plane *drm_plane, struct drm_crtc *crtc,
|
||||
stride_div = intel_fb_stride_alignment(dev, fb->modifier[0],
|
||||
fb->pixel_format);
|
||||
|
||||
scaler_id = to_intel_plane_state(drm_plane->state)->scaler_id;
|
||||
|
||||
/* Sizes are 0 based */
|
||||
src_w--;
|
||||
src_h--;
|
||||
@ -316,19 +245,38 @@ skl_update_plane(struct drm_plane *drm_plane, struct drm_crtc *crtc,
|
||||
|
||||
I915_WRITE(PLANE_OFFSET(pipe, plane), plane_offset);
|
||||
I915_WRITE(PLANE_STRIDE(pipe, plane), stride);
|
||||
I915_WRITE(PLANE_POS(pipe, plane), (crtc_y << 16) | crtc_x);
|
||||
I915_WRITE(PLANE_SIZE(pipe, plane), plane_size);
|
||||
|
||||
/* program plane scaler */
|
||||
if (scaler_id >= 0) {
|
||||
uint32_t ps_ctrl = 0;
|
||||
|
||||
DRM_DEBUG_KMS("plane = %d PS_PLANE_SEL(plane) = 0x%x\n", plane,
|
||||
PS_PLANE_SEL(plane));
|
||||
ps_ctrl = PS_SCALER_EN | PS_PLANE_SEL(plane) |
|
||||
crtc_state->scaler_state.scalers[scaler_id].mode;
|
||||
I915_WRITE(SKL_PS_CTRL(pipe, scaler_id), ps_ctrl);
|
||||
I915_WRITE(SKL_PS_PWR_GATE(pipe, scaler_id), 0);
|
||||
I915_WRITE(SKL_PS_WIN_POS(pipe, scaler_id), (crtc_x << 16) | crtc_y);
|
||||
I915_WRITE(SKL_PS_WIN_SZ(pipe, scaler_id),
|
||||
((crtc_w + 1) << 16)|(crtc_h + 1));
|
||||
|
||||
I915_WRITE(PLANE_POS(pipe, plane), 0);
|
||||
} else {
|
||||
I915_WRITE(PLANE_POS(pipe, plane), (crtc_y << 16) | crtc_x);
|
||||
}
|
||||
|
||||
I915_WRITE(PLANE_CTL(pipe, plane), plane_ctl);
|
||||
I915_WRITE(PLANE_SURF(pipe, plane), surf_addr);
|
||||
POSTING_READ(PLANE_SURF(pipe, plane));
|
||||
}
|
||||
|
||||
static void
|
||||
skl_disable_plane(struct drm_plane *drm_plane, struct drm_crtc *crtc)
|
||||
skl_disable_plane(struct drm_plane *dplane, struct drm_crtc *crtc, bool force)
|
||||
{
|
||||
struct drm_device *dev = drm_plane->dev;
|
||||
struct drm_device *dev = dplane->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_plane *intel_plane = to_intel_plane(drm_plane);
|
||||
struct intel_plane *intel_plane = to_intel_plane(dplane);
|
||||
const int pipe = intel_plane->pipe;
|
||||
const int plane = intel_plane->plane + 1;
|
||||
|
||||
@ -338,7 +286,7 @@ skl_disable_plane(struct drm_plane *drm_plane, struct drm_crtc *crtc)
|
||||
I915_WRITE(PLANE_SURF(pipe, plane), 0);
|
||||
POSTING_READ(PLANE_SURF(pipe, plane));
|
||||
|
||||
intel_update_sprite_watermarks(drm_plane, crtc, 0, 0, 0, false, false);
|
||||
intel_update_sprite_watermarks(dplane, crtc, 0, 0, 0, false, false);
|
||||
}
|
||||
|
||||
static void
|
||||
@ -479,8 +427,6 @@ vlv_update_plane(struct drm_plane *dplane, struct drm_crtc *crtc,
|
||||
linear_offset += src_h * fb->pitches[0] + src_w * pixel_size;
|
||||
}
|
||||
|
||||
intel_update_primary_plane(intel_crtc);
|
||||
|
||||
if (key->flags) {
|
||||
I915_WRITE(SPKEYMINVAL(pipe, plane), key->min_value);
|
||||
I915_WRITE(SPKEYMAXVAL(pipe, plane), key->max_value);
|
||||
@ -512,7 +458,7 @@ vlv_update_plane(struct drm_plane *dplane, struct drm_crtc *crtc,
|
||||
}
|
||||
|
||||
static void
|
||||
vlv_disable_plane(struct drm_plane *dplane, struct drm_crtc *crtc)
|
||||
vlv_disable_plane(struct drm_plane *dplane, struct drm_crtc *crtc, bool force)
|
||||
{
|
||||
struct drm_device *dev = dplane->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
@ -521,8 +467,6 @@ vlv_disable_plane(struct drm_plane *dplane, struct drm_crtc *crtc)
|
||||
int pipe = intel_plane->pipe;
|
||||
int plane = intel_plane->plane;
|
||||
|
||||
intel_update_primary_plane(intel_crtc);
|
||||
|
||||
I915_WRITE(SPCNTR(pipe, plane), 0);
|
||||
|
||||
/* Activate double buffered register update */
|
||||
@ -626,8 +570,6 @@ ivb_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
|
||||
}
|
||||
}
|
||||
|
||||
intel_update_primary_plane(intel_crtc);
|
||||
|
||||
if (key->flags) {
|
||||
I915_WRITE(SPRKEYVAL(pipe), key->min_value);
|
||||
I915_WRITE(SPRKEYMAX(pipe), key->max_value);
|
||||
@ -662,7 +604,7 @@ ivb_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
|
||||
}
|
||||
|
||||
static void
|
||||
ivb_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc)
|
||||
ivb_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc, bool force)
|
||||
{
|
||||
struct drm_device *dev = plane->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
@ -670,8 +612,6 @@ ivb_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc)
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
int pipe = intel_plane->pipe;
|
||||
|
||||
intel_update_primary_plane(intel_crtc);
|
||||
|
||||
I915_WRITE(SPRCTL(pipe), I915_READ(SPRCTL(pipe)) & ~SPRITE_ENABLE);
|
||||
/* Can't leave the scaler enabled... */
|
||||
if (intel_plane->can_scale)
|
||||
@ -766,8 +706,6 @@ ilk_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
|
||||
linear_offset += src_h * fb->pitches[0] + src_w * pixel_size;
|
||||
}
|
||||
|
||||
intel_update_primary_plane(intel_crtc);
|
||||
|
||||
if (key->flags) {
|
||||
I915_WRITE(DVSKEYVAL(pipe), key->min_value);
|
||||
I915_WRITE(DVSKEYMAX(pipe), key->max_value);
|
||||
@ -797,7 +735,7 @@ ilk_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
|
||||
}
|
||||
|
||||
static void
|
||||
ilk_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc)
|
||||
ilk_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc, bool force)
|
||||
{
|
||||
struct drm_device *dev = plane->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
@ -805,8 +743,6 @@ ilk_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc)
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
int pipe = intel_plane->pipe;
|
||||
|
||||
intel_update_primary_plane(intel_crtc);
|
||||
|
||||
I915_WRITE(DVSCNTR(pipe), 0);
|
||||
/* Disable the scaler */
|
||||
I915_WRITE(DVSSCALE(pipe), 0);
|
||||
@ -817,84 +753,13 @@ ilk_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc)
|
||||
intel_flush_primary_plane(dev_priv, intel_crtc->plane);
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_post_enable_primary - Perform operations after enabling primary plane
|
||||
* @crtc: the CRTC whose primary plane was just enabled
|
||||
*
|
||||
* Performs potentially sleeping operations that must be done after the primary
|
||||
* plane is enabled, such as updating FBC and IPS. Note that this may be
|
||||
* called due to an explicit primary plane update, or due to an implicit
|
||||
* re-enable that is caused when a sprite plane is updated to no longer
|
||||
* completely hide the primary plane.
|
||||
*/
|
||||
void
|
||||
intel_post_enable_primary(struct drm_crtc *crtc)
|
||||
{
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
|
||||
/*
|
||||
* BDW signals flip done immediately if the plane
|
||||
* is disabled, even if the plane enable is already
|
||||
* armed to occur at the next vblank :(
|
||||
*/
|
||||
if (IS_BROADWELL(dev))
|
||||
intel_wait_for_vblank(dev, intel_crtc->pipe);
|
||||
|
||||
/*
|
||||
* FIXME IPS should be fine as long as one plane is
|
||||
* enabled, but in practice it seems to have problems
|
||||
* when going from primary only to sprite only and vice
|
||||
* versa.
|
||||
*/
|
||||
hsw_enable_ips(intel_crtc);
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
intel_fbc_update(dev);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_pre_disable_primary - Perform operations before disabling primary plane
|
||||
* @crtc: the CRTC whose primary plane is to be disabled
|
||||
*
|
||||
* Performs potentially sleeping operations that must be done before the
|
||||
* primary plane is enabled, such as updating FBC and IPS. Note that this may
|
||||
* be called due to an explicit primary plane update, or due to an implicit
|
||||
* disable that is caused when a sprite plane completely hides the primary
|
||||
* plane.
|
||||
*/
|
||||
void
|
||||
intel_pre_disable_primary(struct drm_crtc *crtc)
|
||||
{
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
if (dev_priv->fbc.crtc == intel_crtc)
|
||||
intel_fbc_disable(dev);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
/*
|
||||
* FIXME IPS should be fine as long as one plane is
|
||||
* enabled, but in practice it seems to have problems
|
||||
* when going from primary only to sprite only and vice
|
||||
* versa.
|
||||
*/
|
||||
hsw_disable_ips(intel_crtc);
|
||||
}
|
||||
|
||||
static bool colorkey_enabled(struct intel_plane *intel_plane)
|
||||
{
|
||||
return intel_plane->ckey.flags != I915_SET_COLORKEY_NONE;
|
||||
}
|
||||
|
||||
static int
|
||||
intel_check_sprite_plane(struct drm_plane *plane,
|
||||
struct intel_plane_state *state)
|
||||
{
|
||||
struct drm_device *dev = plane->dev;
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(state->base.crtc);
|
||||
struct intel_crtc_state *crtc_state;
|
||||
struct intel_plane *intel_plane = to_intel_plane(plane);
|
||||
struct drm_framebuffer *fb = state->base.fb;
|
||||
int crtc_x, crtc_y;
|
||||
@ -906,8 +771,11 @@ intel_check_sprite_plane(struct drm_plane *plane,
|
||||
int hscale, vscale;
|
||||
int max_scale, min_scale;
|
||||
int pixel_size;
|
||||
int ret;
|
||||
|
||||
intel_crtc = intel_crtc ? intel_crtc : to_intel_crtc(plane->crtc);
|
||||
crtc_state = state->base.state ?
|
||||
intel_atomic_get_crtc_state(state->base.state, intel_crtc) : NULL;
|
||||
|
||||
if (!fb) {
|
||||
state->visible = false;
|
||||
@ -934,6 +802,11 @@ intel_check_sprite_plane(struct drm_plane *plane,
|
||||
max_scale = intel_plane->max_downscale << 16;
|
||||
min_scale = intel_plane->can_scale ? 1 : (1 << 16);
|
||||
|
||||
if (INTEL_INFO(dev)->gen >= 9) {
|
||||
min_scale = 1;
|
||||
max_scale = skl_max_scale(intel_crtc, crtc_state);
|
||||
}
|
||||
|
||||
drm_rect_rotate(src, fb->width << 16, fb->height << 16,
|
||||
state->base.rotation);
|
||||
|
||||
@ -1029,8 +902,8 @@ intel_check_sprite_plane(struct drm_plane *plane,
|
||||
width_bytes = ((src_x * pixel_size) & 63) +
|
||||
src_w * pixel_size;
|
||||
|
||||
if (src_w > 2048 || src_h > 2048 ||
|
||||
width_bytes > 4096 || fb->pitches[0] > 4096) {
|
||||
if (INTEL_INFO(dev)->gen < 9 && (src_w > 2048 || src_h > 2048 ||
|
||||
width_bytes > 4096 || fb->pitches[0] > 4096)) {
|
||||
DRM_DEBUG_KMS("Source dimensions exceed hardware limits\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
@ -1053,23 +926,10 @@ finish:
|
||||
* If the sprite is completely covering the primary plane,
|
||||
* we can disable the primary and save power.
|
||||
*/
|
||||
state->hides_primary = fb != NULL && drm_rect_equals(dst, clip) &&
|
||||
!colorkey_enabled(intel_plane);
|
||||
WARN_ON(state->hides_primary && !state->visible && intel_crtc->active);
|
||||
|
||||
if (intel_crtc->active) {
|
||||
if (intel_crtc->primary_enabled == state->hides_primary)
|
||||
intel_crtc->atomic.wait_for_flips = true;
|
||||
|
||||
if (intel_crtc->primary_enabled && state->hides_primary)
|
||||
intel_crtc->atomic.pre_disable_primary = true;
|
||||
|
||||
intel_crtc->atomic.fb_bits |=
|
||||
INTEL_FRONTBUFFER_SPRITE(intel_crtc->pipe);
|
||||
|
||||
if (!intel_crtc->primary_enabled && !state->hides_primary)
|
||||
intel_crtc->atomic.post_enable_primary = true;
|
||||
|
||||
if (intel_wm_need_update(plane, &state->base))
|
||||
intel_crtc->atomic.update_wm = true;
|
||||
|
||||
@ -1084,6 +944,13 @@ finish:
|
||||
}
|
||||
}
|
||||
|
||||
if (INTEL_INFO(dev)->gen >= 9) {
|
||||
ret = skl_update_scaler_users(intel_crtc, crtc_state, intel_plane,
|
||||
state, 0);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1105,8 +972,6 @@ intel_commit_sprite_plane(struct drm_plane *plane,
|
||||
plane->fb = fb;
|
||||
|
||||
if (intel_crtc->active) {
|
||||
intel_crtc->primary_enabled = !state->hides_primary;
|
||||
|
||||
if (state->visible) {
|
||||
crtc_x = state->dst.x1;
|
||||
crtc_y = state->dst.y1;
|
||||
@ -1120,7 +985,7 @@ intel_commit_sprite_plane(struct drm_plane *plane,
|
||||
crtc_x, crtc_y, crtc_w, crtc_h,
|
||||
src_x, src_y, src_w, src_h);
|
||||
} else {
|
||||
intel_plane->disable_plane(plane, crtc);
|
||||
intel_plane->disable_plane(plane, crtc, false);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1150,6 +1015,16 @@ int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
|
||||
}
|
||||
|
||||
intel_plane = to_intel_plane(plane);
|
||||
|
||||
if (INTEL_INFO(dev)->gen >= 9) {
|
||||
/* plane scaling and colorkey are mutually exclusive */
|
||||
if (to_intel_plane_state(plane->state)->scaler_id >= 0) {
|
||||
DRM_ERROR("colorkey not allowed with scaler\n");
|
||||
ret = -EINVAL;
|
||||
goto out_unlock;
|
||||
}
|
||||
}
|
||||
|
||||
intel_plane->ckey = *set;
|
||||
|
||||
/*
|
||||
@ -1286,12 +1161,7 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane)
|
||||
}
|
||||
break;
|
||||
case 9:
|
||||
/*
|
||||
* FIXME: Skylake planes can be scaled (with some restrictions),
|
||||
* but this is for another time.
|
||||
*/
|
||||
intel_plane->can_scale = false;
|
||||
intel_plane->max_downscale = 1;
|
||||
intel_plane->can_scale = true;
|
||||
intel_plane->update_plane = skl_update_plane;
|
||||
intel_plane->disable_plane = skl_disable_plane;
|
||||
state->scaler_id = -1;
|
||||
|
@ -647,6 +647,7 @@ struct drm_encoder {
|
||||
* @audio_latency: audio latency info from ELD, if found
|
||||
* @null_edid_counter: track sinks that give us all zeros for the EDID
|
||||
* @bad_edid_counter: track sinks that give us an EDID with invalid checksum
|
||||
* @edid_corrupt: indicates whether the last read EDID was corrupt
|
||||
* @debugfs_entry: debugfs directory for this connector
|
||||
* @state: current atomic state for this connector
|
||||
* @has_tile: is this connector connected to a tiled monitor
|
||||
@ -719,6 +720,11 @@ struct drm_connector {
|
||||
int null_edid_counter; /* needed to workaround some HW bugs where we get all 0s */
|
||||
unsigned bad_edid_counter;
|
||||
|
||||
/* Flag for raw EDID header corruption - used in Displayport
|
||||
* compliance testing - * Displayport Link CTS Core 1.2 rev1.1 4.2.2.6
|
||||
*/
|
||||
bool edid_corrupt;
|
||||
|
||||
struct dentry *debugfs_entry;
|
||||
|
||||
struct drm_connector_state *state;
|
||||
@ -1443,7 +1449,8 @@ extern void drm_set_preferred_mode(struct drm_connector *connector,
|
||||
int hpref, int vpref);
|
||||
|
||||
extern int drm_edid_header_is_valid(const u8 *raw_edid);
|
||||
extern bool drm_edid_block_valid(u8 *raw_edid, int block, bool print_bad_edid);
|
||||
extern bool drm_edid_block_valid(u8 *raw_edid, int block, bool print_bad_edid,
|
||||
bool *edid_corrupt);
|
||||
extern bool drm_edid_is_valid(struct edid *edid);
|
||||
|
||||
extern struct drm_tile_group *drm_mode_create_tile_group(struct drm_device *dev,
|
||||
|
Loading…
Reference in New Issue
Block a user