Merge remote branch 'intel/drm-intel-fixes' of /ssd/git/drm-next into drm-fixes

* 'intel/drm-intel-fixes' of /ssd/git/drm-next:
  drm/i915/sdvo: Always add a 30ms delay to make SDVO TV detection reliable
  MAINTAINERS: INTEL DRM DRIVERS list (intel-gfx) is subscribers-only
  drm/i915/sdvo: Always fallback to querying the shared DDC line
  drm/i915: Handle pagefaults in execbuffer user relocations
  drm/i915/sdvo: Only enable HDMI encodings only if the commandset is supported
  drm/i915: Only save/restore cursor regs if !KMS
  drm/i915: Prevent integer overflow when validating the execbuffer
This commit is contained in:
Dave Airlie 2010-11-26 10:45:03 +10:00
commit e536fb6f9d
4 changed files with 357 additions and 259 deletions

View File

@ -2080,7 +2080,7 @@ F: include/drm/
INTEL DRM DRIVERS (excluding Poulsbo, Moorestown and derivative chipsets)
M: Chris Wilson <chris@chris-wilson.co.uk>
L: intel-gfx@lists.freedesktop.org
L: intel-gfx@lists.freedesktop.org (subscribers-only)
L: dri-devel@lists.freedesktop.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/ickle/drm-intel.git
S: Supported

View File

@ -3254,192 +3254,230 @@ i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
return 0;
}
/**
* Pin an object to the GTT and evaluate the relocations landing in it.
*/
static int
i915_gem_execbuffer_relocate(struct drm_i915_gem_object *obj,
struct drm_file *file_priv,
struct drm_i915_gem_exec_object2 *entry)
i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
struct drm_file *file_priv,
struct drm_i915_gem_exec_object2 *entry,
struct drm_i915_gem_relocation_entry *reloc)
{
struct drm_device *dev = obj->base.dev;
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_gem_relocation_entry __user *user_relocs;
struct drm_gem_object *target_obj = NULL;
uint32_t target_handle = 0;
int i, ret = 0;
struct drm_gem_object *target_obj;
uint32_t target_offset;
int ret = -EINVAL;
user_relocs = (void __user *)(uintptr_t)entry->relocs_ptr;
for (i = 0; i < entry->relocation_count; i++) {
struct drm_i915_gem_relocation_entry reloc;
uint32_t target_offset;
target_obj = drm_gem_object_lookup(dev, file_priv,
reloc->target_handle);
if (target_obj == NULL)
return -ENOENT;
if (__copy_from_user_inatomic(&reloc,
user_relocs+i,
sizeof(reloc))) {
ret = -EFAULT;
break;
}
if (reloc.target_handle != target_handle) {
drm_gem_object_unreference(target_obj);
target_obj = drm_gem_object_lookup(dev, file_priv,
reloc.target_handle);
if (target_obj == NULL) {
ret = -ENOENT;
break;
}
target_handle = reloc.target_handle;
}
target_offset = to_intel_bo(target_obj)->gtt_offset;
target_offset = to_intel_bo(target_obj)->gtt_offset;
#if WATCH_RELOC
DRM_INFO("%s: obj %p offset %08x target %d "
"read %08x write %08x gtt %08x "
"presumed %08x delta %08x\n",
__func__,
obj,
(int) reloc.offset,
(int) reloc.target_handle,
(int) reloc.read_domains,
(int) reloc.write_domain,
(int) target_offset,
(int) reloc.presumed_offset,
reloc.delta);
DRM_INFO("%s: obj %p offset %08x target %d "
"read %08x write %08x gtt %08x "
"presumed %08x delta %08x\n",
__func__,
obj,
(int) reloc->offset,
(int) reloc->target_handle,
(int) reloc->read_domains,
(int) reloc->write_domain,
(int) target_offset,
(int) reloc->presumed_offset,
reloc->delta);
#endif
/* The target buffer should have appeared before us in the
* exec_object list, so it should have a GTT space bound by now.
*/
if (target_offset == 0) {
DRM_ERROR("No GTT space found for object %d\n",
reloc.target_handle);
ret = -EINVAL;
break;
}
/* Validate that the target is in a valid r/w GPU domain */
if (reloc.write_domain & (reloc.write_domain - 1)) {
DRM_ERROR("reloc with multiple write domains: "
"obj %p target %d offset %d "
"read %08x write %08x",
obj, reloc.target_handle,
(int) reloc.offset,
reloc.read_domains,
reloc.write_domain);
ret = -EINVAL;
break;
}
if (reloc.write_domain & I915_GEM_DOMAIN_CPU ||
reloc.read_domains & I915_GEM_DOMAIN_CPU) {
DRM_ERROR("reloc with read/write CPU domains: "
"obj %p target %d offset %d "
"read %08x write %08x",
obj, reloc.target_handle,
(int) reloc.offset,
reloc.read_domains,
reloc.write_domain);
ret = -EINVAL;
break;
}
if (reloc.write_domain && target_obj->pending_write_domain &&
reloc.write_domain != target_obj->pending_write_domain) {
DRM_ERROR("Write domain conflict: "
"obj %p target %d offset %d "
"new %08x old %08x\n",
obj, reloc.target_handle,
(int) reloc.offset,
reloc.write_domain,
target_obj->pending_write_domain);
ret = -EINVAL;
break;
}
target_obj->pending_read_domains |= reloc.read_domains;
target_obj->pending_write_domain |= reloc.write_domain;
/* If the relocation already has the right value in it, no
* more work needs to be done.
*/
if (target_offset == reloc.presumed_offset)
continue;
/* Check that the relocation address is valid... */
if (reloc.offset > obj->base.size - 4) {
DRM_ERROR("Relocation beyond object bounds: "
"obj %p target %d offset %d size %d.\n",
obj, reloc.target_handle,
(int) reloc.offset, (int) obj->base.size);
ret = -EINVAL;
break;
}
if (reloc.offset & 3) {
DRM_ERROR("Relocation not 4-byte aligned: "
"obj %p target %d offset %d.\n",
obj, reloc.target_handle,
(int) reloc.offset);
ret = -EINVAL;
break;
}
/* and points to somewhere within the target object. */
if (reloc.delta >= target_obj->size) {
DRM_ERROR("Relocation beyond target object bounds: "
"obj %p target %d delta %d size %d.\n",
obj, reloc.target_handle,
(int) reloc.delta, (int) target_obj->size);
ret = -EINVAL;
break;
}
reloc.delta += target_offset;
if (obj->base.write_domain == I915_GEM_DOMAIN_CPU) {
uint32_t page_offset = reloc.offset & ~PAGE_MASK;
char *vaddr;
vaddr = kmap_atomic(obj->pages[reloc.offset >> PAGE_SHIFT]);
*(uint32_t *)(vaddr + page_offset) = reloc.delta;
kunmap_atomic(vaddr);
} else {
uint32_t __iomem *reloc_entry;
void __iomem *reloc_page;
ret = i915_gem_object_set_to_gtt_domain(&obj->base, 1);
if (ret)
break;
/* Map the page containing the relocation we're going to perform. */
reloc.offset += obj->gtt_offset;
reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
reloc.offset & PAGE_MASK);
reloc_entry = (uint32_t __iomem *)
(reloc_page + (reloc.offset & ~PAGE_MASK));
iowrite32(reloc.delta, reloc_entry);
io_mapping_unmap_atomic(reloc_page);
}
/* and update the user's relocation entry */
reloc.presumed_offset = target_offset;
if (__copy_to_user_inatomic(&user_relocs[i].presumed_offset,
&reloc.presumed_offset,
sizeof(reloc.presumed_offset))) {
ret = -EFAULT;
break;
}
/* The target buffer should have appeared before us in the
* exec_object list, so it should have a GTT space bound by now.
*/
if (target_offset == 0) {
DRM_ERROR("No GTT space found for object %d\n",
reloc->target_handle);
goto err;
}
/* Validate that the target is in a valid r/w GPU domain */
if (reloc->write_domain & (reloc->write_domain - 1)) {
DRM_ERROR("reloc with multiple write domains: "
"obj %p target %d offset %d "
"read %08x write %08x",
obj, reloc->target_handle,
(int) reloc->offset,
reloc->read_domains,
reloc->write_domain);
goto err;
}
if (reloc->write_domain & I915_GEM_DOMAIN_CPU ||
reloc->read_domains & I915_GEM_DOMAIN_CPU) {
DRM_ERROR("reloc with read/write CPU domains: "
"obj %p target %d offset %d "
"read %08x write %08x",
obj, reloc->target_handle,
(int) reloc->offset,
reloc->read_domains,
reloc->write_domain);
goto err;
}
if (reloc->write_domain && target_obj->pending_write_domain &&
reloc->write_domain != target_obj->pending_write_domain) {
DRM_ERROR("Write domain conflict: "
"obj %p target %d offset %d "
"new %08x old %08x\n",
obj, reloc->target_handle,
(int) reloc->offset,
reloc->write_domain,
target_obj->pending_write_domain);
goto err;
}
target_obj->pending_read_domains |= reloc->read_domains;
target_obj->pending_write_domain |= reloc->write_domain;
/* If the relocation already has the right value in it, no
* more work needs to be done.
*/
if (target_offset == reloc->presumed_offset)
goto out;
/* Check that the relocation address is valid... */
if (reloc->offset > obj->base.size - 4) {
DRM_ERROR("Relocation beyond object bounds: "
"obj %p target %d offset %d size %d.\n",
obj, reloc->target_handle,
(int) reloc->offset,
(int) obj->base.size);
goto err;
}
if (reloc->offset & 3) {
DRM_ERROR("Relocation not 4-byte aligned: "
"obj %p target %d offset %d.\n",
obj, reloc->target_handle,
(int) reloc->offset);
goto err;
}
/* and points to somewhere within the target object. */
if (reloc->delta >= target_obj->size) {
DRM_ERROR("Relocation beyond target object bounds: "
"obj %p target %d delta %d size %d.\n",
obj, reloc->target_handle,
(int) reloc->delta,
(int) target_obj->size);
goto err;
}
reloc->delta += target_offset;
if (obj->base.write_domain == I915_GEM_DOMAIN_CPU) {
uint32_t page_offset = reloc->offset & ~PAGE_MASK;
char *vaddr;
vaddr = kmap_atomic(obj->pages[reloc->offset >> PAGE_SHIFT]);
*(uint32_t *)(vaddr + page_offset) = reloc->delta;
kunmap_atomic(vaddr);
} else {
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t __iomem *reloc_entry;
void __iomem *reloc_page;
ret = i915_gem_object_set_to_gtt_domain(&obj->base, 1);
if (ret)
goto err;
/* Map the page containing the relocation we're going to perform. */
reloc->offset += obj->gtt_offset;
reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
reloc->offset & PAGE_MASK);
reloc_entry = (uint32_t __iomem *)
(reloc_page + (reloc->offset & ~PAGE_MASK));
iowrite32(reloc->delta, reloc_entry);
io_mapping_unmap_atomic(reloc_page);
}
/* and update the user's relocation entry */
reloc->presumed_offset = target_offset;
out:
ret = 0;
err:
drm_gem_object_unreference(target_obj);
return ret;
}
static int
i915_gem_execbuffer_pin(struct drm_device *dev,
struct drm_file *file,
struct drm_gem_object **object_list,
struct drm_i915_gem_exec_object2 *exec_list,
int count)
i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj,
struct drm_file *file_priv,
struct drm_i915_gem_exec_object2 *entry)
{
struct drm_i915_gem_relocation_entry __user *user_relocs;
int i, ret;
user_relocs = (void __user *)(uintptr_t)entry->relocs_ptr;
for (i = 0; i < entry->relocation_count; i++) {
struct drm_i915_gem_relocation_entry reloc;
if (__copy_from_user_inatomic(&reloc,
user_relocs+i,
sizeof(reloc)))
return -EFAULT;
ret = i915_gem_execbuffer_relocate_entry(obj, file_priv, entry, &reloc);
if (ret)
return ret;
if (__copy_to_user_inatomic(&user_relocs[i].presumed_offset,
&reloc.presumed_offset,
sizeof(reloc.presumed_offset)))
return -EFAULT;
}
return 0;
}
static int
i915_gem_execbuffer_relocate_object_slow(struct drm_i915_gem_object *obj,
struct drm_file *file_priv,
struct drm_i915_gem_exec_object2 *entry,
struct drm_i915_gem_relocation_entry *relocs)
{
int i, ret;
for (i = 0; i < entry->relocation_count; i++) {
ret = i915_gem_execbuffer_relocate_entry(obj, file_priv, entry, &relocs[i]);
if (ret)
return ret;
}
return 0;
}
static int
i915_gem_execbuffer_relocate(struct drm_device *dev,
struct drm_file *file,
struct drm_gem_object **object_list,
struct drm_i915_gem_exec_object2 *exec_list,
int count)
{
int i, ret;
for (i = 0; i < count; i++) {
struct drm_i915_gem_object *obj = to_intel_bo(object_list[i]);
obj->base.pending_read_domains = 0;
obj->base.pending_write_domain = 0;
ret = i915_gem_execbuffer_relocate_object(obj, file,
&exec_list[i]);
if (ret)
return ret;
}
return 0;
}
static int
i915_gem_execbuffer_reserve(struct drm_device *dev,
struct drm_file *file,
struct drm_gem_object **object_list,
struct drm_i915_gem_exec_object2 *exec_list,
int count)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int ret, i, retry;
@ -3501,6 +3539,87 @@ i915_gem_execbuffer_pin(struct drm_device *dev,
return 0;
}
static int
i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
struct drm_file *file,
struct drm_gem_object **object_list,
struct drm_i915_gem_exec_object2 *exec_list,
int count)
{
struct drm_i915_gem_relocation_entry *reloc;
int i, total, ret;
for (i = 0; i < count; i++) {
struct drm_i915_gem_object *obj = to_intel_bo(object_list[i]);
obj->in_execbuffer = false;
}
mutex_unlock(&dev->struct_mutex);
total = 0;
for (i = 0; i < count; i++)
total += exec_list[i].relocation_count;
reloc = drm_malloc_ab(total, sizeof(*reloc));
if (reloc == NULL) {
mutex_lock(&dev->struct_mutex);
return -ENOMEM;
}
total = 0;
for (i = 0; i < count; i++) {
struct drm_i915_gem_relocation_entry __user *user_relocs;
user_relocs = (void __user *)(uintptr_t)exec_list[i].relocs_ptr;
if (copy_from_user(reloc+total, user_relocs,
exec_list[i].relocation_count *
sizeof(*reloc))) {
ret = -EFAULT;
mutex_lock(&dev->struct_mutex);
goto err;
}
total += exec_list[i].relocation_count;
}
ret = i915_mutex_lock_interruptible(dev);
if (ret) {
mutex_lock(&dev->struct_mutex);
goto err;
}
ret = i915_gem_execbuffer_reserve(dev, file,
object_list, exec_list,
count);
if (ret)
goto err;
total = 0;
for (i = 0; i < count; i++) {
struct drm_i915_gem_object *obj = to_intel_bo(object_list[i]);
obj->base.pending_read_domains = 0;
obj->base.pending_write_domain = 0;
ret = i915_gem_execbuffer_relocate_object_slow(obj, file,
&exec_list[i],
reloc + total);
if (ret)
goto err;
total += exec_list[i].relocation_count;
}
/* Leave the user relocations as are, this is the painfully slow path,
* and we want to avoid the complication of dropping the lock whilst
* having buffers reserved in the aperture and so causing spurious
* ENOSPC for random operations.
*/
err:
drm_free_large(reloc);
return ret;
}
static int
i915_gem_execbuffer_move_to_gpu(struct drm_device *dev,
struct drm_file *file,
@ -3630,8 +3749,15 @@ validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
for (i = 0; i < count; i++) {
char __user *ptr = (char __user *)(uintptr_t)exec[i].relocs_ptr;
size_t length = exec[i].relocation_count * sizeof(struct drm_i915_gem_relocation_entry);
int length; /* limited by fault_in_pages_readable() */
/* First check for malicious input causing overflow */
if (exec[i].relocation_count >
INT_MAX / sizeof(struct drm_i915_gem_relocation_entry))
return -EINVAL;
length = exec[i].relocation_count *
sizeof(struct drm_i915_gem_relocation_entry);
if (!access_ok(VERIFY_READ, ptr, length))
return -EFAULT;
@ -3774,18 +3900,24 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
}
/* Move the objects en-masse into the GTT, evicting if necessary. */
ret = i915_gem_execbuffer_pin(dev, file,
object_list, exec_list,
args->buffer_count);
ret = i915_gem_execbuffer_reserve(dev, file,
object_list, exec_list,
args->buffer_count);
if (ret)
goto err;
/* The objects are in their final locations, apply the relocations. */
for (i = 0; i < args->buffer_count; i++) {
struct drm_i915_gem_object *obj = to_intel_bo(object_list[i]);
obj->base.pending_read_domains = 0;
obj->base.pending_write_domain = 0;
ret = i915_gem_execbuffer_relocate(obj, file, &exec_list[i]);
ret = i915_gem_execbuffer_relocate(dev, file,
object_list, exec_list,
args->buffer_count);
if (ret) {
if (ret == -EFAULT) {
ret = i915_gem_execbuffer_relocate_slow(dev, file,
object_list,
exec_list,
args->buffer_count);
BUG_ON(!mutex_is_locked(&dev->struct_mutex));
}
if (ret)
goto err;
}

View File

@ -239,6 +239,16 @@ static void i915_save_modeset_reg(struct drm_device *dev)
if (drm_core_check_feature(dev, DRIVER_MODESET))
return;
/* Cursor state */
dev_priv->saveCURACNTR = I915_READ(CURACNTR);
dev_priv->saveCURAPOS = I915_READ(CURAPOS);
dev_priv->saveCURABASE = I915_READ(CURABASE);
dev_priv->saveCURBCNTR = I915_READ(CURBCNTR);
dev_priv->saveCURBPOS = I915_READ(CURBPOS);
dev_priv->saveCURBBASE = I915_READ(CURBBASE);
if (IS_GEN2(dev))
dev_priv->saveCURSIZE = I915_READ(CURSIZE);
if (HAS_PCH_SPLIT(dev)) {
dev_priv->savePCH_DREF_CONTROL = I915_READ(PCH_DREF_CONTROL);
dev_priv->saveDISP_ARB_CTL = I915_READ(DISP_ARB_CTL);
@ -529,6 +539,16 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
I915_WRITE(DSPBCNTR, dev_priv->saveDSPBCNTR);
I915_WRITE(DSPBADDR, I915_READ(DSPBADDR));
/* Cursor state */
I915_WRITE(CURAPOS, dev_priv->saveCURAPOS);
I915_WRITE(CURACNTR, dev_priv->saveCURACNTR);
I915_WRITE(CURABASE, dev_priv->saveCURABASE);
I915_WRITE(CURBPOS, dev_priv->saveCURBPOS);
I915_WRITE(CURBCNTR, dev_priv->saveCURBCNTR);
I915_WRITE(CURBBASE, dev_priv->saveCURBBASE);
if (IS_GEN2(dev))
I915_WRITE(CURSIZE, dev_priv->saveCURSIZE);
return;
}
@ -543,16 +563,6 @@ void i915_save_display(struct drm_device *dev)
/* Don't save them in KMS mode */
i915_save_modeset_reg(dev);
/* Cursor state */
dev_priv->saveCURACNTR = I915_READ(CURACNTR);
dev_priv->saveCURAPOS = I915_READ(CURAPOS);
dev_priv->saveCURABASE = I915_READ(CURABASE);
dev_priv->saveCURBCNTR = I915_READ(CURBCNTR);
dev_priv->saveCURBPOS = I915_READ(CURBPOS);
dev_priv->saveCURBBASE = I915_READ(CURBBASE);
if (IS_GEN2(dev))
dev_priv->saveCURSIZE = I915_READ(CURSIZE);
/* CRT state */
if (HAS_PCH_SPLIT(dev)) {
dev_priv->saveADPA = I915_READ(PCH_ADPA);
@ -657,16 +667,6 @@ void i915_restore_display(struct drm_device *dev)
/* Don't restore them in KMS mode */
i915_restore_modeset_reg(dev);
/* Cursor state */
I915_WRITE(CURAPOS, dev_priv->saveCURAPOS);
I915_WRITE(CURACNTR, dev_priv->saveCURACNTR);
I915_WRITE(CURABASE, dev_priv->saveCURABASE);
I915_WRITE(CURBPOS, dev_priv->saveCURBPOS);
I915_WRITE(CURBCNTR, dev_priv->saveCURBCNTR);
I915_WRITE(CURBBASE, dev_priv->saveCURBBASE);
if (IS_GEN2(dev))
I915_WRITE(CURSIZE, dev_priv->saveCURSIZE);
/* CRT state */
if (HAS_PCH_SPLIT(dev))
I915_WRITE(PCH_ADPA, dev_priv->saveADPA);

View File

@ -107,7 +107,8 @@ struct intel_sdvo {
* This is set if we treat the device as HDMI, instead of DVI.
*/
bool is_hdmi;
bool has_audio;
bool has_hdmi_monitor;
bool has_hdmi_audio;
/**
* This is set if we detect output of sdvo device as LVDS and
@ -1023,7 +1024,7 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
if (!intel_sdvo_set_target_input(intel_sdvo))
return;
if (intel_sdvo->is_hdmi &&
if (intel_sdvo->has_hdmi_monitor &&
!intel_sdvo_set_avi_infoframe(intel_sdvo))
return;
@ -1063,7 +1064,7 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
}
if (intel_crtc->pipe == 1)
sdvox |= SDVO_PIPE_B_SELECT;
if (intel_sdvo->has_audio)
if (intel_sdvo->has_hdmi_audio)
sdvox |= SDVO_AUDIO_ENABLE;
if (INTEL_INFO(dev)->gen >= 4) {
@ -1295,55 +1296,14 @@ intel_sdvo_get_edid(struct drm_connector *connector)
return drm_get_edid(connector, &sdvo->ddc);
}
static struct drm_connector *
intel_find_analog_connector(struct drm_device *dev)
{
struct drm_connector *connector;
struct intel_sdvo *encoder;
list_for_each_entry(encoder,
&dev->mode_config.encoder_list,
base.base.head) {
if (encoder->base.type == INTEL_OUTPUT_ANALOG) {
list_for_each_entry(connector,
&dev->mode_config.connector_list,
head) {
if (&encoder->base ==
intel_attached_encoder(connector))
return connector;
}
}
}
return NULL;
}
static int
intel_analog_is_connected(struct drm_device *dev)
{
struct drm_connector *analog_connector;
analog_connector = intel_find_analog_connector(dev);
if (!analog_connector)
return false;
if (analog_connector->funcs->detect(analog_connector, false) ==
connector_status_disconnected)
return false;
return true;
}
/* Mac mini hack -- use the same DDC as the analog connector */
static struct edid *
intel_sdvo_get_analog_edid(struct drm_connector *connector)
{
struct drm_i915_private *dev_priv = connector->dev->dev_private;
if (!intel_analog_is_connected(connector->dev))
return NULL;
return drm_get_edid(connector, &dev_priv->gmbus[dev_priv->crt_ddc_pin].adapter);
return drm_get_edid(connector,
&dev_priv->gmbus[dev_priv->crt_ddc_pin].adapter);
}
enum drm_connector_status
@ -1388,8 +1348,10 @@ intel_sdvo_hdmi_sink_detect(struct drm_connector *connector)
/* DDC bus is shared, match EDID to connector type */
if (edid->input & DRM_EDID_INPUT_DIGITAL) {
status = connector_status_connected;
intel_sdvo->is_hdmi = drm_detect_hdmi_monitor(edid);
intel_sdvo->has_audio = drm_detect_monitor_audio(edid);
if (intel_sdvo->is_hdmi) {
intel_sdvo->has_hdmi_monitor = drm_detect_hdmi_monitor(edid);
intel_sdvo->has_hdmi_audio = drm_detect_monitor_audio(edid);
}
}
connector->display_info.raw_edid = NULL;
kfree(edid);
@ -1398,7 +1360,7 @@ intel_sdvo_hdmi_sink_detect(struct drm_connector *connector)
if (status == connector_status_connected) {
struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
if (intel_sdvo_connector->force_audio)
intel_sdvo->has_audio = intel_sdvo_connector->force_audio > 0;
intel_sdvo->has_hdmi_audio = intel_sdvo_connector->force_audio > 0;
}
return status;
@ -1415,10 +1377,12 @@ intel_sdvo_detect(struct drm_connector *connector, bool force)
if (!intel_sdvo_write_cmd(intel_sdvo,
SDVO_CMD_GET_ATTACHED_DISPLAYS, NULL, 0))
return connector_status_unknown;
if (intel_sdvo->is_tv) {
/* add 30ms delay when the output type is SDVO-TV */
/* add 30ms delay when the output type might be TV */
if (intel_sdvo->caps.output_flags &
(SDVO_OUTPUT_SVID0 | SDVO_OUTPUT_CVBS0))
mdelay(30);
}
if (!intel_sdvo_read_response(intel_sdvo, &response, 2))
return connector_status_unknown;
@ -1472,8 +1436,10 @@ static void intel_sdvo_get_ddc_modes(struct drm_connector *connector)
edid = intel_sdvo_get_analog_edid(connector);
if (edid != NULL) {
drm_mode_connector_update_edid_property(connector, edid);
drm_add_edid_modes(connector, edid);
if (edid->input & DRM_EDID_INPUT_DIGITAL) {
drm_mode_connector_update_edid_property(connector, edid);
drm_add_edid_modes(connector, edid);
}
connector->display_info.raw_edid = NULL;
kfree(edid);
}
@ -1713,12 +1679,12 @@ intel_sdvo_set_property(struct drm_connector *connector,
intel_sdvo_connector->force_audio = val;
if (val > 0 && intel_sdvo->has_audio)
if (val > 0 && intel_sdvo->has_hdmi_audio)
return 0;
if (val < 0 && !intel_sdvo->has_audio)
if (val < 0 && !intel_sdvo->has_hdmi_audio)
return 0;
intel_sdvo->has_audio = val > 0;
intel_sdvo->has_hdmi_audio = val > 0;
goto done;
}
@ -2070,6 +2036,8 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device)
intel_sdvo_set_colorimetry(intel_sdvo,
SDVO_COLORIMETRY_RGB256);
connector->connector_type = DRM_MODE_CONNECTOR_HDMIA;
intel_sdvo_add_hdmi_properties(intel_sdvo_connector);
intel_sdvo->is_hdmi = true;
}
intel_sdvo->base.clone_mask = ((1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
@ -2077,8 +2045,6 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device)
intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo);
intel_sdvo_add_hdmi_properties(intel_sdvo_connector);
return true;
}