Merge branch 'drm-next' of git://people.freedesktop.org/~airlied/linux
Pull drm updates from Dave Airlie:
 "One of the smaller drm -next pulls in ages!
  Ben (nouveau) has a rewrite in progress but we decided to leave it
  stew for another cycle, so just some fixes from him.
   - radeon: lots of documentation work, fixes, more ring and locking
     changes, pcie gen2, more dp fixes.
   - i915: haswell features, gpu reset fixes, /dev/agpgart removal on
     machines that we never used it on, more VGA/HDP fix., more DP fixes
   - drm core: cleanups from Daniel, sis 64-bit fixes, range allocator
     colouring.
  but yeah fairly quiet merge this time, probably because I missed half
  of it!"
Trivial add-add conflict in include/linux/pci_regs.h
* 'drm-next' of git://people.freedesktop.org/~airlied/linux: (255 commits)
  drm/nouveau: init vblank requests list
  drm/nv50: extend vblank semaphore to generic dmaobj + offset pair
  drm/nouveau: mark most of our ioctls as deprecated, move to compat layer
  drm/nouveau: move current gpuobj code out of nouveau_object.c
  drm/nouveau/gem: fix object reference leak in a failure path
  drm/nv50: rename INVALID_QUERY_OR_TEXTURE error to INVALID_OPERATION
  drm/nv84: decode PCRYPT errors
  drm/nouveau: dcb table quirk for fdo#50830
  nouveau: Fix alignment requirements on src and dst addresses
  drm/i915: unbreak lastclose for failed driver init
  drm/i915: Set the context before setting up regs for the context.
  drm/i915: constify mode in crtc_mode_fixup
  drm/i915/lvds: ditch ->prepare special case
  drm/i915: dereferencing an error pointer
  drm/i915: fix invalid reference handling of the default ctx obj
  drm/i915: Add -EIO to the list of known errors for __wait_seqno
  drm/i915: Flush the context object from the CPU caches upon switching
  drm/radeon: fix dpms on/off on trinity/aruba v2
  drm/radeon: on hotplug force link training to happen (v2)
  drm/radeon: fix hotplug of DP to DVI|HDMI passive adapters (v2)
  ...
			
			
This commit is contained in:
		
						commit
						bd22dc17e4
					
				| @ -12,6 +12,7 @@ | ||||
| #include <asm/smp.h> | ||||
| #include "agp.h" | ||||
| #include "intel-agp.h" | ||||
| #include <drm/intel-gtt.h> | ||||
| 
 | ||||
| int intel_agp_enabled; | ||||
| EXPORT_SYMBOL(intel_agp_enabled); | ||||
| @ -747,7 +748,7 @@ static int __devinit agp_intel_probe(struct pci_dev *pdev, | ||||
| 
 | ||||
| 	bridge->capndx = cap_ptr; | ||||
| 
 | ||||
| 	if (intel_gmch_probe(pdev, bridge)) | ||||
| 	if (intel_gmch_probe(pdev, NULL, bridge)) | ||||
| 		goto found_gmch; | ||||
| 
 | ||||
| 	for (i = 0; intel_agp_chipsets[i].name != NULL; i++) { | ||||
| @ -824,7 +825,7 @@ static void __devexit agp_intel_remove(struct pci_dev *pdev) | ||||
| 
 | ||||
| 	agp_remove_bridge(bridge); | ||||
| 
 | ||||
| 	intel_gmch_remove(pdev); | ||||
| 	intel_gmch_remove(); | ||||
| 
 | ||||
| 	agp_put_bridge(bridge); | ||||
| } | ||||
| @ -902,17 +903,6 @@ static struct pci_device_id agp_intel_pci_table[] = { | ||||
| 	ID(PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB), | ||||
| 	ID(PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB), | ||||
| 	ID(PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB), | ||||
| 	ID(PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB), | ||||
| 	ID(PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB), | ||||
| 	ID(PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_HB), | ||||
| 	ID(PCI_DEVICE_ID_INTEL_IVYBRIDGE_HB), | ||||
| 	ID(PCI_DEVICE_ID_INTEL_IVYBRIDGE_M_HB), | ||||
| 	ID(PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_HB), | ||||
| 	ID(PCI_DEVICE_ID_INTEL_VALLEYVIEW_HB), | ||||
| 	ID(PCI_DEVICE_ID_INTEL_HASWELL_HB), | ||||
| 	ID(PCI_DEVICE_ID_INTEL_HASWELL_M_HB), | ||||
| 	ID(PCI_DEVICE_ID_INTEL_HASWELL_S_HB), | ||||
| 	ID(PCI_DEVICE_ID_INTEL_HASWELL_E_HB), | ||||
| 	{ } | ||||
| }; | ||||
| 
 | ||||
|  | ||||
| @ -251,7 +251,4 @@ | ||||
| #define PCI_DEVICE_ID_INTEL_HASWELL_SDV		0x0c16 /* SDV */ | ||||
| #define PCI_DEVICE_ID_INTEL_HASWELL_E_HB			0x0c04 | ||||
| 
 | ||||
| int intel_gmch_probe(struct pci_dev *pdev, | ||||
| 			       struct agp_bridge_data *bridge); | ||||
| void intel_gmch_remove(struct pci_dev *pdev); | ||||
| #endif | ||||
|  | ||||
| @ -66,7 +66,6 @@ static struct _intel_private { | ||||
| 	struct pci_dev *bridge_dev; | ||||
| 	u8 __iomem *registers; | ||||
| 	phys_addr_t gtt_bus_addr; | ||||
| 	phys_addr_t gma_bus_addr; | ||||
| 	u32 PGETBL_save; | ||||
| 	u32 __iomem *gtt;		/* I915G */ | ||||
| 	bool clear_fake_agp; /* on first access via agp, fill with scratch */ | ||||
| @ -76,6 +75,7 @@ static struct _intel_private { | ||||
| 	struct resource ifp_resource; | ||||
| 	int resource_valid; | ||||
| 	struct page *scratch_page; | ||||
| 	int refcount; | ||||
| } intel_private; | ||||
| 
 | ||||
| #define INTEL_GTT_GEN	intel_private.driver->gen | ||||
| @ -648,6 +648,7 @@ static void intel_gtt_cleanup(void) | ||||
| 
 | ||||
| static int intel_gtt_init(void) | ||||
| { | ||||
| 	u32 gma_addr; | ||||
| 	u32 gtt_map_size; | ||||
| 	int ret; | ||||
| 
 | ||||
| @ -694,6 +695,15 @@ static int intel_gtt_init(void) | ||||
| 		return ret; | ||||
| 	} | ||||
| 
 | ||||
| 	if (INTEL_GTT_GEN <= 2) | ||||
| 		pci_read_config_dword(intel_private.pcidev, I810_GMADDR, | ||||
| 				      &gma_addr); | ||||
| 	else | ||||
| 		pci_read_config_dword(intel_private.pcidev, I915_GMADDR, | ||||
| 				      &gma_addr); | ||||
| 
 | ||||
| 	intel_private.base.gma_bus_addr = (gma_addr & PCI_BASE_ADDRESS_MEM_MASK); | ||||
| 
 | ||||
| 	return 0; | ||||
| } | ||||
| 
 | ||||
| @ -767,20 +777,10 @@ static void i830_write_entry(dma_addr_t addr, unsigned int entry, | ||||
| 	writel(addr | pte_flags, intel_private.gtt + entry); | ||||
| } | ||||
| 
 | ||||
| static bool intel_enable_gtt(void) | ||||
| bool intel_enable_gtt(void) | ||||
| { | ||||
| 	u32 gma_addr; | ||||
| 	u8 __iomem *reg; | ||||
| 
 | ||||
| 	if (INTEL_GTT_GEN <= 2) | ||||
| 		pci_read_config_dword(intel_private.pcidev, I810_GMADDR, | ||||
| 				      &gma_addr); | ||||
| 	else | ||||
| 		pci_read_config_dword(intel_private.pcidev, I915_GMADDR, | ||||
| 				      &gma_addr); | ||||
| 
 | ||||
| 	intel_private.gma_bus_addr = (gma_addr & PCI_BASE_ADDRESS_MEM_MASK); | ||||
| 
 | ||||
| 	if (INTEL_GTT_GEN >= 6) | ||||
| 	    return true; | ||||
| 
 | ||||
| @ -823,6 +823,7 @@ static bool intel_enable_gtt(void) | ||||
| 
 | ||||
| 	return true; | ||||
| } | ||||
| EXPORT_SYMBOL(intel_enable_gtt); | ||||
| 
 | ||||
| static int i830_setup(void) | ||||
| { | ||||
| @ -860,7 +861,7 @@ static int intel_fake_agp_configure(void) | ||||
| 	    return -EIO; | ||||
| 
 | ||||
| 	intel_private.clear_fake_agp = true; | ||||
| 	agp_bridge->gart_bus_addr = intel_private.gma_bus_addr; | ||||
| 	agp_bridge->gart_bus_addr = intel_private.base.gma_bus_addr; | ||||
| 
 | ||||
| 	return 0; | ||||
| } | ||||
| @ -1182,9 +1183,17 @@ static void gen6_write_entry(dma_addr_t addr, unsigned int entry, | ||||
| static void valleyview_write_entry(dma_addr_t addr, unsigned int entry, | ||||
| 				   unsigned int flags) | ||||
| { | ||||
| 	unsigned int type_mask = flags & ~AGP_USER_CACHED_MEMORY_GFDT; | ||||
| 	unsigned int gfdt = flags & AGP_USER_CACHED_MEMORY_GFDT; | ||||
| 	u32 pte_flags; | ||||
| 
 | ||||
| 	pte_flags = GEN6_PTE_UNCACHED | I810_PTE_VALID; | ||||
| 	if (type_mask == AGP_USER_MEMORY) | ||||
| 		pte_flags = GEN6_PTE_UNCACHED | I810_PTE_VALID; | ||||
| 	else { | ||||
| 		pte_flags = GEN6_PTE_LLC | I810_PTE_VALID; | ||||
| 		if (gfdt) | ||||
| 			pte_flags |= GEN6_PTE_GFDT; | ||||
| 	} | ||||
| 
 | ||||
| 	/* gen6 has bit11-4 for physical addr bit39-32 */ | ||||
| 	addr |= (addr >> 28) & 0xff0; | ||||
| @ -1244,6 +1253,7 @@ static int i9xx_setup(void) | ||||
| 		switch (INTEL_GTT_GEN) { | ||||
| 		case 5: | ||||
| 		case 6: | ||||
| 		case 7: | ||||
| 			gtt_offset = MB(2); | ||||
| 			break; | ||||
| 		case 4: | ||||
| @ -1379,7 +1389,6 @@ static const struct intel_gtt_driver valleyview_gtt_driver = { | ||||
| 	.write_entry = valleyview_write_entry, | ||||
| 	.dma_mask_size = 40, | ||||
| 	.check_flags = gen6_check_flags, | ||||
| 	.chipset_flush = i9xx_chipset_flush, | ||||
| }; | ||||
| 
 | ||||
| /* Table to describe Intel GMCH and AGP/PCIE GART drivers.  At least one of
 | ||||
| @ -1523,14 +1532,32 @@ static int find_gmch(u16 device) | ||||
| 	return 1; | ||||
| } | ||||
| 
 | ||||
| int intel_gmch_probe(struct pci_dev *pdev, | ||||
| 				      struct agp_bridge_data *bridge) | ||||
| int intel_gmch_probe(struct pci_dev *bridge_pdev, struct pci_dev *gpu_pdev, | ||||
| 		     struct agp_bridge_data *bridge) | ||||
| { | ||||
| 	int i, mask; | ||||
| 	intel_private.driver = NULL; | ||||
| 
 | ||||
| 	/*
 | ||||
| 	 * Can be called from the fake agp driver but also directly from | ||||
| 	 * drm/i915.ko. Hence we need to check whether everything is set up | ||||
| 	 * already. | ||||
| 	 */ | ||||
| 	if (intel_private.driver) { | ||||
| 		intel_private.refcount++; | ||||
| 		return 1; | ||||
| 	} | ||||
| 
 | ||||
| 	for (i = 0; intel_gtt_chipsets[i].name != NULL; i++) { | ||||
| 		if (find_gmch(intel_gtt_chipsets[i].gmch_chip_id)) { | ||||
| 		if (gpu_pdev) { | ||||
| 			if (gpu_pdev->device == | ||||
| 			    intel_gtt_chipsets[i].gmch_chip_id) { | ||||
| 				intel_private.pcidev = pci_dev_get(gpu_pdev); | ||||
| 				intel_private.driver = | ||||
| 					intel_gtt_chipsets[i].gtt_driver; | ||||
| 
 | ||||
| 				break; | ||||
| 			} | ||||
| 		} else if (find_gmch(intel_gtt_chipsets[i].gmch_chip_id)) { | ||||
| 			intel_private.driver = | ||||
| 				intel_gtt_chipsets[i].gtt_driver; | ||||
| 			break; | ||||
| @ -1540,13 +1567,17 @@ int intel_gmch_probe(struct pci_dev *pdev, | ||||
| 	if (!intel_private.driver) | ||||
| 		return 0; | ||||
| 
 | ||||
| 	bridge->driver = &intel_fake_agp_driver; | ||||
| 	bridge->dev_private_data = &intel_private; | ||||
| 	bridge->dev = pdev; | ||||
| 	intel_private.refcount++; | ||||
| 
 | ||||
| 	intel_private.bridge_dev = pci_dev_get(pdev); | ||||
| 	if (bridge) { | ||||
| 		bridge->driver = &intel_fake_agp_driver; | ||||
| 		bridge->dev_private_data = &intel_private; | ||||
| 		bridge->dev = bridge_pdev; | ||||
| 	} | ||||
| 
 | ||||
| 	dev_info(&pdev->dev, "Intel %s Chipset\n", intel_gtt_chipsets[i].name); | ||||
| 	intel_private.bridge_dev = pci_dev_get(bridge_pdev); | ||||
| 
 | ||||
| 	dev_info(&bridge_pdev->dev, "Intel %s Chipset\n", intel_gtt_chipsets[i].name); | ||||
| 
 | ||||
| 	mask = intel_private.driver->dma_mask_size; | ||||
| 	if (pci_set_dma_mask(intel_private.pcidev, DMA_BIT_MASK(mask))) | ||||
| @ -1556,11 +1587,11 @@ int intel_gmch_probe(struct pci_dev *pdev, | ||||
| 		pci_set_consistent_dma_mask(intel_private.pcidev, | ||||
| 					    DMA_BIT_MASK(mask)); | ||||
| 
 | ||||
| 	/*if (bridge->driver == &intel_810_driver)
 | ||||
| 		return 1;*/ | ||||
| 	if (intel_gtt_init() != 0) { | ||||
| 		intel_gmch_remove(); | ||||
| 
 | ||||
| 	if (intel_gtt_init() != 0) | ||||
| 		return 0; | ||||
| 	} | ||||
| 
 | ||||
| 	return 1; | ||||
| } | ||||
| @ -1579,12 +1610,16 @@ void intel_gtt_chipset_flush(void) | ||||
| } | ||||
| EXPORT_SYMBOL(intel_gtt_chipset_flush); | ||||
| 
 | ||||
| void intel_gmch_remove(struct pci_dev *pdev) | ||||
| void intel_gmch_remove(void) | ||||
| { | ||||
| 	if (--intel_private.refcount) | ||||
| 		return; | ||||
| 
 | ||||
| 	if (intel_private.pcidev) | ||||
| 		pci_dev_put(intel_private.pcidev); | ||||
| 	if (intel_private.bridge_dev) | ||||
| 		pci_dev_put(intel_private.bridge_dev); | ||||
| 	intel_private.driver = NULL; | ||||
| } | ||||
| EXPORT_SYMBOL(intel_gmch_remove); | ||||
| 
 | ||||
|  | ||||
| @ -460,8 +460,8 @@ static void ast_crtc_dpms(struct drm_crtc *crtc, int mode) | ||||
| } | ||||
| 
 | ||||
| static bool ast_crtc_mode_fixup(struct drm_crtc *crtc, | ||||
| 				  struct drm_display_mode *mode, | ||||
| 				  struct drm_display_mode *adjusted_mode) | ||||
| 				const struct drm_display_mode *mode, | ||||
| 				struct drm_display_mode *adjusted_mode) | ||||
| { | ||||
| 	return true; | ||||
| } | ||||
| @ -680,7 +680,7 @@ static void ast_encoder_dpms(struct drm_encoder *encoder, int mode) | ||||
| } | ||||
| 
 | ||||
| static bool ast_mode_fixup(struct drm_encoder *encoder, | ||||
| 			   struct drm_display_mode *mode, | ||||
| 			   const struct drm_display_mode *mode, | ||||
| 			   struct drm_display_mode *adjusted_mode) | ||||
| { | ||||
| 	return true; | ||||
|  | ||||
| @ -97,7 +97,7 @@ static void cirrus_crtc_dpms(struct drm_crtc *crtc, int mode) | ||||
|  * to just pass that straight through, so this does nothing | ||||
|  */ | ||||
| static bool cirrus_crtc_mode_fixup(struct drm_crtc *crtc, | ||||
| 				   struct drm_display_mode *mode, | ||||
| 				   const struct drm_display_mode *mode, | ||||
| 				   struct drm_display_mode *adjusted_mode) | ||||
| { | ||||
| 	return true; | ||||
| @ -429,8 +429,8 @@ void cirrus_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green, | ||||
| 
 | ||||
| 
 | ||||
| static bool cirrus_encoder_mode_fixup(struct drm_encoder *encoder, | ||||
| 				  struct drm_display_mode *mode, | ||||
| 				  struct drm_display_mode *adjusted_mode) | ||||
| 				      const struct drm_display_mode *mode, | ||||
| 				      struct drm_display_mode *adjusted_mode) | ||||
| { | ||||
| 	return true; | ||||
| } | ||||
|  | ||||
| @ -641,8 +641,6 @@ int drm_addbufs_agp(struct drm_device * dev, struct drm_buf_desc * request) | ||||
| 
 | ||||
| 	if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) | ||||
| 		return -EINVAL; | ||||
| 	if (dev->queue_count) | ||||
| 		return -EBUSY;	/* Not while in use */ | ||||
| 
 | ||||
| 	/* Make sure buffers are located in AGP memory that we own */ | ||||
| 	valid = 0; | ||||
| @ -704,7 +702,6 @@ int drm_addbufs_agp(struct drm_device * dev, struct drm_buf_desc * request) | ||||
| 		buf->next = NULL; | ||||
| 		buf->waiting = 0; | ||||
| 		buf->pending = 0; | ||||
| 		init_waitqueue_head(&buf->dma_wait); | ||||
| 		buf->file_priv = NULL; | ||||
| 
 | ||||
| 		buf->dev_priv_size = dev->driver->dev_priv_size; | ||||
| @ -796,13 +793,11 @@ int drm_addbufs_pci(struct drm_device * dev, struct drm_buf_desc * request) | ||||
| 	order = drm_order(request->size); | ||||
| 	size = 1 << order; | ||||
| 
 | ||||
| 	DRM_DEBUG("count=%d, size=%d (%d), order=%d, queue_count=%d\n", | ||||
| 		  request->count, request->size, size, order, dev->queue_count); | ||||
| 	DRM_DEBUG("count=%d, size=%d (%d), order=%d\n", | ||||
| 		  request->count, request->size, size, order); | ||||
| 
 | ||||
| 	if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) | ||||
| 		return -EINVAL; | ||||
| 	if (dev->queue_count) | ||||
| 		return -EBUSY;	/* Not while in use */ | ||||
| 
 | ||||
| 	alignment = (request->flags & _DRM_PAGE_ALIGN) | ||||
| 	    ? PAGE_ALIGN(size) : size; | ||||
| @ -904,7 +899,6 @@ int drm_addbufs_pci(struct drm_device * dev, struct drm_buf_desc * request) | ||||
| 			buf->next = NULL; | ||||
| 			buf->waiting = 0; | ||||
| 			buf->pending = 0; | ||||
| 			init_waitqueue_head(&buf->dma_wait); | ||||
| 			buf->file_priv = NULL; | ||||
| 
 | ||||
| 			buf->dev_priv_size = dev->driver->dev_priv_size; | ||||
| @ -1019,8 +1013,6 @@ static int drm_addbufs_sg(struct drm_device * dev, struct drm_buf_desc * request | ||||
| 
 | ||||
| 	if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) | ||||
| 		return -EINVAL; | ||||
| 	if (dev->queue_count) | ||||
| 		return -EBUSY;	/* Not while in use */ | ||||
| 
 | ||||
| 	spin_lock(&dev->count_lock); | ||||
| 	if (dev->buf_use) { | ||||
| @ -1071,7 +1063,6 @@ static int drm_addbufs_sg(struct drm_device * dev, struct drm_buf_desc * request | ||||
| 		buf->next = NULL; | ||||
| 		buf->waiting = 0; | ||||
| 		buf->pending = 0; | ||||
| 		init_waitqueue_head(&buf->dma_wait); | ||||
| 		buf->file_priv = NULL; | ||||
| 
 | ||||
| 		buf->dev_priv_size = dev->driver->dev_priv_size; | ||||
| @ -1177,8 +1168,6 @@ static int drm_addbufs_fb(struct drm_device * dev, struct drm_buf_desc * request | ||||
| 
 | ||||
| 	if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) | ||||
| 		return -EINVAL; | ||||
| 	if (dev->queue_count) | ||||
| 		return -EBUSY;	/* Not while in use */ | ||||
| 
 | ||||
| 	spin_lock(&dev->count_lock); | ||||
| 	if (dev->buf_use) { | ||||
| @ -1228,7 +1217,6 @@ static int drm_addbufs_fb(struct drm_device * dev, struct drm_buf_desc * request | ||||
| 		buf->next = NULL; | ||||
| 		buf->waiting = 0; | ||||
| 		buf->pending = 0; | ||||
| 		init_waitqueue_head(&buf->dma_wait); | ||||
| 		buf->file_priv = NULL; | ||||
| 
 | ||||
| 		buf->dev_priv_size = dev->driver->dev_priv_size; | ||||
|  | ||||
| @ -46,7 +46,6 @@ static struct drm_info_list drm_debugfs_list[] = { | ||||
| 	{"name", drm_name_info, 0}, | ||||
| 	{"vm", drm_vm_info, 0}, | ||||
| 	{"clients", drm_clients_info, 0}, | ||||
| 	{"queues", drm_queues_info, 0}, | ||||
| 	{"bufs", drm_bufs_info, 0}, | ||||
| 	{"gem_names", drm_gem_name_info, DRIVER_GEM}, | ||||
| #if DRM_DEBUG_CODE | ||||
|  | ||||
| @ -120,11 +120,6 @@ void drm_free_buffer(struct drm_device *dev, struct drm_buf * buf) | ||||
| 	buf->pending = 0; | ||||
| 	buf->file_priv = NULL; | ||||
| 	buf->used = 0; | ||||
| 
 | ||||
| 	if (drm_core_check_feature(dev, DRIVER_DMA_QUEUE) | ||||
| 	    && waitqueue_active(&buf->dma_wait)) { | ||||
| 		wake_up_interruptible(&buf->dma_wait); | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| /**
 | ||||
|  | ||||
| @ -182,7 +182,6 @@ static struct drm_ioctl_desc drm_ioctls[] = { | ||||
| int drm_lastclose(struct drm_device * dev) | ||||
| { | ||||
| 	struct drm_vma_entry *vma, *vma_temp; | ||||
| 	int i; | ||||
| 
 | ||||
| 	DRM_DEBUG("\n"); | ||||
| 
 | ||||
| @ -228,16 +227,6 @@ int drm_lastclose(struct drm_device * dev) | ||||
| 		kfree(vma); | ||||
| 	} | ||||
| 
 | ||||
| 	if (drm_core_check_feature(dev, DRIVER_DMA_QUEUE) && dev->queuelist) { | ||||
| 		for (i = 0; i < dev->queue_count; i++) { | ||||
| 			kfree(dev->queuelist[i]); | ||||
| 			dev->queuelist[i] = NULL; | ||||
| 		} | ||||
| 		kfree(dev->queuelist); | ||||
| 		dev->queuelist = NULL; | ||||
| 	} | ||||
| 	dev->queue_count = 0; | ||||
| 
 | ||||
| 	if (drm_core_check_feature(dev, DRIVER_HAVE_DMA) && | ||||
| 	    !drm_core_check_feature(dev, DRIVER_MODESET)) | ||||
| 		drm_dma_takedown(dev); | ||||
| @ -486,7 +475,7 @@ long drm_ioctl(struct file *filp, | ||||
| 		kfree(kdata); | ||||
| 	atomic_dec(&dev->ioctl_count); | ||||
| 	if (retcode) | ||||
| 		DRM_DEBUG("ret = %x\n", retcode); | ||||
| 		DRM_DEBUG("ret = %d\n", retcode); | ||||
| 	return retcode; | ||||
| } | ||||
| 
 | ||||
|  | ||||
| @ -228,7 +228,7 @@ bool drm_fb_helper_restore_fbdev_mode(struct drm_fb_helper *fb_helper) | ||||
| 	int i, ret; | ||||
| 	for (i = 0; i < fb_helper->crtc_count; i++) { | ||||
| 		struct drm_mode_set *mode_set = &fb_helper->crtc_info[i].mode_set; | ||||
| 		ret = drm_crtc_helper_set_config(mode_set); | ||||
| 		ret = mode_set->crtc->funcs->set_config(mode_set); | ||||
| 		if (ret) | ||||
| 			error = true; | ||||
| 	} | ||||
| @ -1353,7 +1353,7 @@ int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper) | ||||
| 	struct drm_device *dev = fb_helper->dev; | ||||
| 	int count = 0; | ||||
| 	u32 max_width, max_height, bpp_sel; | ||||
| 	bool bound = false, crtcs_bound = false; | ||||
| 	int bound = 0, crtcs_bound = 0; | ||||
| 	struct drm_crtc *crtc; | ||||
| 
 | ||||
| 	if (!fb_helper->fb) | ||||
| @ -1362,12 +1362,12 @@ int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper) | ||||
| 	mutex_lock(&dev->mode_config.mutex); | ||||
| 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { | ||||
| 		if (crtc->fb) | ||||
| 			crtcs_bound = true; | ||||
| 			crtcs_bound++; | ||||
| 		if (crtc->fb == fb_helper->fb) | ||||
| 			bound = true; | ||||
| 			bound++; | ||||
| 	} | ||||
| 
 | ||||
| 	if (!bound && crtcs_bound) { | ||||
| 	if (bound < crtcs_bound) { | ||||
| 		fb_helper->delayed_hotplug = true; | ||||
| 		mutex_unlock(&dev->mode_config.mutex); | ||||
| 		return 0; | ||||
|  | ||||
| @ -75,10 +75,6 @@ static int drm_setup(struct drm_device * dev) | ||||
| 
 | ||||
| 	dev->sigdata.lock = NULL; | ||||
| 
 | ||||
| 	dev->queue_count = 0; | ||||
| 	dev->queue_reserved = 0; | ||||
| 	dev->queue_slots = 0; | ||||
| 	dev->queuelist = NULL; | ||||
| 	dev->context_flag = 0; | ||||
| 	dev->interrupt_flag = 0; | ||||
| 	dev->dma_flag = 0; | ||||
| @ -144,12 +140,12 @@ int drm_open(struct inode *inode, struct file *filp) | ||||
| 	} | ||||
| 	if (!retcode) { | ||||
| 		mutex_lock(&dev->struct_mutex); | ||||
| 		if (minor->type == DRM_MINOR_LEGACY) { | ||||
| 			if (dev->dev_mapping == NULL) | ||||
| 				dev->dev_mapping = inode->i_mapping; | ||||
| 			else if (dev->dev_mapping != inode->i_mapping) | ||||
| 				retcode = -ENODEV; | ||||
| 		} | ||||
| 		if (dev->dev_mapping == NULL) | ||||
| 			dev->dev_mapping = &inode->i_data; | ||||
| 		/* ihold ensures nobody can remove inode with our i_data */ | ||||
| 		ihold(container_of(dev->dev_mapping, struct inode, i_data)); | ||||
| 		inode->i_mapping = dev->dev_mapping; | ||||
| 		filp->f_mapping = dev->dev_mapping; | ||||
| 		mutex_unlock(&dev->struct_mutex); | ||||
| 	} | ||||
| 
 | ||||
| @ -370,72 +366,16 @@ int drm_fasync(int fd, struct file *filp, int on) | ||||
| } | ||||
| EXPORT_SYMBOL(drm_fasync); | ||||
| 
 | ||||
| /*
 | ||||
|  * Reclaim locked buffers; note that this may be a bad idea if the current | ||||
|  * context doesn't have the hw lock... | ||||
|  */ | ||||
| static void drm_reclaim_locked_buffers(struct drm_device *dev, struct file *f) | ||||
| { | ||||
| 	struct drm_file *file_priv = f->private_data; | ||||
| 
 | ||||
| 	if (drm_i_have_hw_lock(dev, file_priv)) { | ||||
| 		dev->driver->reclaim_buffers_locked(dev, file_priv); | ||||
| 	} else { | ||||
| 		unsigned long _end = jiffies + 3 * DRM_HZ; | ||||
| 		int locked = 0; | ||||
| 
 | ||||
| 		drm_idlelock_take(&file_priv->master->lock); | ||||
| 
 | ||||
| 		/*
 | ||||
| 		 * Wait for a while. | ||||
| 		 */ | ||||
| 		do { | ||||
| 			spin_lock_bh(&file_priv->master->lock.spinlock); | ||||
| 			locked = file_priv->master->lock.idle_has_lock; | ||||
| 			spin_unlock_bh(&file_priv->master->lock.spinlock); | ||||
| 			if (locked) | ||||
| 				break; | ||||
| 			schedule(); | ||||
| 		} while (!time_after_eq(jiffies, _end)); | ||||
| 
 | ||||
| 		if (!locked) { | ||||
| 			DRM_ERROR("reclaim_buffers_locked() deadlock. Please rework this\n" | ||||
| 				  "\tdriver to use reclaim_buffers_idlelocked() instead.\n" | ||||
| 				  "\tI will go on reclaiming the buffers anyway.\n"); | ||||
| 		} | ||||
| 
 | ||||
| 		dev->driver->reclaim_buffers_locked(dev, file_priv); | ||||
| 		drm_idlelock_release(&file_priv->master->lock); | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| static void drm_master_release(struct drm_device *dev, struct file *filp) | ||||
| { | ||||
| 	struct drm_file *file_priv = filp->private_data; | ||||
| 
 | ||||
| 	if (dev->driver->reclaim_buffers_locked && | ||||
| 	    file_priv->master->lock.hw_lock) | ||||
| 		drm_reclaim_locked_buffers(dev, filp); | ||||
| 
 | ||||
| 	if (dev->driver->reclaim_buffers_idlelocked && | ||||
| 	    file_priv->master->lock.hw_lock) { | ||||
| 		drm_idlelock_take(&file_priv->master->lock); | ||||
| 		dev->driver->reclaim_buffers_idlelocked(dev, file_priv); | ||||
| 		drm_idlelock_release(&file_priv->master->lock); | ||||
| 	} | ||||
| 
 | ||||
| 
 | ||||
| 	if (drm_i_have_hw_lock(dev, file_priv)) { | ||||
| 		DRM_DEBUG("File %p released, freeing lock for context %d\n", | ||||
| 			  filp, _DRM_LOCKING_CONTEXT(file_priv->master->lock.hw_lock->lock)); | ||||
| 		drm_lock_free(&file_priv->master->lock, | ||||
| 			      _DRM_LOCKING_CONTEXT(file_priv->master->lock.hw_lock->lock)); | ||||
| 	} | ||||
| 
 | ||||
| 	if (drm_core_check_feature(dev, DRIVER_HAVE_DMA) && | ||||
| 	    !dev->driver->reclaim_buffers_locked) { | ||||
| 		dev->driver->reclaim_buffers(dev, file_priv); | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| static void drm_events_release(struct drm_file *file_priv) | ||||
| @ -505,6 +445,9 @@ int drm_release(struct inode *inode, struct file *filp) | ||||
| 	if (file_priv->minor->master) | ||||
| 		drm_master_release(dev, filp); | ||||
| 
 | ||||
| 	if (drm_core_check_feature(dev, DRIVER_HAVE_DMA)) | ||||
| 		drm_core_reclaim_buffers(dev, file_priv); | ||||
| 
 | ||||
| 	drm_events_release(file_priv); | ||||
| 
 | ||||
| 	if (dev->driver->driver_features & DRIVER_MODESET) | ||||
| @ -566,6 +509,9 @@ int drm_release(struct inode *inode, struct file *filp) | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	BUG_ON(dev->dev_mapping == NULL); | ||||
| 	iput(container_of(dev->dev_mapping, struct inode, i_data)); | ||||
| 
 | ||||
| 	/* drop the reference held my the file priv */ | ||||
| 	drm_master_put(&file_priv->master); | ||||
| 	file_priv->is_master = 0; | ||||
|  | ||||
| @ -354,7 +354,7 @@ drm_gem_create_mmap_offset(struct drm_gem_object *obj) | ||||
| 
 | ||||
| 	/* Get a DRM GEM mmap offset allocated... */ | ||||
| 	list->file_offset_node = drm_mm_search_free(&mm->offset_manager, | ||||
| 			obj->size / PAGE_SIZE, 0, 0); | ||||
| 			obj->size / PAGE_SIZE, 0, false); | ||||
| 
 | ||||
| 	if (!list->file_offset_node) { | ||||
| 		DRM_ERROR("failed to allocate offset for bo %d\n", obj->name); | ||||
|  | ||||
| @ -109,42 +109,6 @@ int drm_vm_info(struct seq_file *m, void *data) | ||||
| 	return 0; | ||||
| } | ||||
| 
 | ||||
| /**
 | ||||
|  * Called when "/proc/dri/.../queues" is read. | ||||
|  */ | ||||
| int drm_queues_info(struct seq_file *m, void *data) | ||||
| { | ||||
| 	struct drm_info_node *node = (struct drm_info_node *) m->private; | ||||
| 	struct drm_device *dev = node->minor->dev; | ||||
| 	int i; | ||||
| 	struct drm_queue *q; | ||||
| 
 | ||||
| 	mutex_lock(&dev->struct_mutex); | ||||
| 	seq_printf(m, "  ctx/flags   use   fin" | ||||
| 		   "   blk/rw/rwf  wait    flushed	   queued" | ||||
| 		   "      locks\n\n"); | ||||
| 	for (i = 0; i < dev->queue_count; i++) { | ||||
| 		q = dev->queuelist[i]; | ||||
| 		atomic_inc(&q->use_count); | ||||
| 		seq_printf(m,   "%5d/0x%03x %5d %5d" | ||||
| 			   " %5d/%c%c/%c%c%c %5Zd\n", | ||||
| 			   i, | ||||
| 			   q->flags, | ||||
| 			   atomic_read(&q->use_count), | ||||
| 			   atomic_read(&q->finalization), | ||||
| 			   atomic_read(&q->block_count), | ||||
| 			   atomic_read(&q->block_read) ? 'r' : '-', | ||||
| 			   atomic_read(&q->block_write) ? 'w' : '-', | ||||
| 			   waitqueue_active(&q->read_queue) ? 'r' : '-', | ||||
| 			   waitqueue_active(&q->write_queue) ? 'w' : '-', | ||||
| 			   waitqueue_active(&q->flush_queue) ? 'f' : '-', | ||||
| 			   DRM_BUFCOUNT(&q->waitlist)); | ||||
| 		atomic_dec(&q->use_count); | ||||
| 	} | ||||
| 	mutex_unlock(&dev->struct_mutex); | ||||
| 	return 0; | ||||
| } | ||||
| 
 | ||||
| /**
 | ||||
|  * Called when "/proc/dri/.../bufs" is read. | ||||
|  */ | ||||
| @ -235,7 +199,7 @@ int drm_clients_info(struct seq_file *m, void *data) | ||||
| } | ||||
| 
 | ||||
| 
 | ||||
| int drm_gem_one_name_info(int id, void *ptr, void *data) | ||||
| static int drm_gem_one_name_info(int id, void *ptr, void *data) | ||||
| { | ||||
| 	struct drm_gem_object *obj = ptr; | ||||
| 	struct seq_file *m = data; | ||||
|  | ||||
| @ -974,7 +974,6 @@ EXPORT_SYMBOL(drm_vblank_off); | ||||
|  * drm_vblank_pre_modeset - account for vblanks across mode sets | ||||
|  * @dev: DRM device | ||||
|  * @crtc: CRTC in question | ||||
|  * @post: post or pre mode set? | ||||
|  * | ||||
|  * Account for vblank events across mode setting events, which will likely | ||||
|  * reset the hardware frame counter. | ||||
| @ -1037,6 +1036,10 @@ int drm_modeset_ctl(struct drm_device *dev, void *data, | ||||
| 	if (!dev->num_crtcs) | ||||
| 		return 0; | ||||
| 
 | ||||
| 	/* KMS drivers handle this internally */ | ||||
| 	if (drm_core_check_feature(dev, DRIVER_MODESET)) | ||||
| 		return 0; | ||||
| 
 | ||||
| 	crtc = modeset->crtc; | ||||
| 	if (crtc >= dev->num_crtcs) | ||||
| 		return -EINVAL; | ||||
|  | ||||
| @ -70,10 +70,6 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv) | ||||
| 		  lock->context, task_pid_nr(current), | ||||
| 		  master->lock.hw_lock->lock, lock->flags); | ||||
| 
 | ||||
| 	if (drm_core_check_feature(dev, DRIVER_DMA_QUEUE)) | ||||
| 		if (lock->context < 0) | ||||
| 			return -EINVAL; | ||||
| 
 | ||||
| 	add_wait_queue(&master->lock.lock_queue, &entry); | ||||
| 	spin_lock_bh(&master->lock.spinlock); | ||||
| 	master->lock.user_waiters++; | ||||
|  | ||||
| @ -118,45 +118,53 @@ static inline unsigned long drm_mm_hole_node_end(struct drm_mm_node *hole_node) | ||||
| 
 | ||||
| static void drm_mm_insert_helper(struct drm_mm_node *hole_node, | ||||
| 				 struct drm_mm_node *node, | ||||
| 				 unsigned long size, unsigned alignment) | ||||
| 				 unsigned long size, unsigned alignment, | ||||
| 				 unsigned long color) | ||||
| { | ||||
| 	struct drm_mm *mm = hole_node->mm; | ||||
| 	unsigned long tmp = 0, wasted = 0; | ||||
| 	unsigned long hole_start = drm_mm_hole_node_start(hole_node); | ||||
| 	unsigned long hole_end = drm_mm_hole_node_end(hole_node); | ||||
| 	unsigned long adj_start = hole_start; | ||||
| 	unsigned long adj_end = hole_end; | ||||
| 
 | ||||
| 	BUG_ON(!hole_node->hole_follows || node->allocated); | ||||
| 
 | ||||
| 	if (alignment) | ||||
| 		tmp = hole_start % alignment; | ||||
| 	if (mm->color_adjust) | ||||
| 		mm->color_adjust(hole_node, color, &adj_start, &adj_end); | ||||
| 
 | ||||
| 	if (!tmp) { | ||||
| 	if (alignment) { | ||||
| 		unsigned tmp = adj_start % alignment; | ||||
| 		if (tmp) | ||||
| 			adj_start += alignment - tmp; | ||||
| 	} | ||||
| 
 | ||||
| 	if (adj_start == hole_start) { | ||||
| 		hole_node->hole_follows = 0; | ||||
| 		list_del_init(&hole_node->hole_stack); | ||||
| 	} else | ||||
| 		wasted = alignment - tmp; | ||||
| 		list_del(&hole_node->hole_stack); | ||||
| 	} | ||||
| 
 | ||||
| 	node->start = hole_start + wasted; | ||||
| 	node->start = adj_start; | ||||
| 	node->size = size; | ||||
| 	node->mm = mm; | ||||
| 	node->color = color; | ||||
| 	node->allocated = 1; | ||||
| 
 | ||||
| 	INIT_LIST_HEAD(&node->hole_stack); | ||||
| 	list_add(&node->node_list, &hole_node->node_list); | ||||
| 
 | ||||
| 	BUG_ON(node->start + node->size > hole_end); | ||||
| 	BUG_ON(node->start + node->size > adj_end); | ||||
| 
 | ||||
| 	node->hole_follows = 0; | ||||
| 	if (node->start + node->size < hole_end) { | ||||
| 		list_add(&node->hole_stack, &mm->hole_stack); | ||||
| 		node->hole_follows = 1; | ||||
| 	} else { | ||||
| 		node->hole_follows = 0; | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *hole_node, | ||||
| 					     unsigned long size, | ||||
| 					     unsigned alignment, | ||||
| 					     unsigned long color, | ||||
| 					     int atomic) | ||||
| { | ||||
| 	struct drm_mm_node *node; | ||||
| @ -165,7 +173,7 @@ struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *hole_node, | ||||
| 	if (unlikely(node == NULL)) | ||||
| 		return NULL; | ||||
| 
 | ||||
| 	drm_mm_insert_helper(hole_node, node, size, alignment); | ||||
| 	drm_mm_insert_helper(hole_node, node, size, alignment, color); | ||||
| 
 | ||||
| 	return node; | ||||
| } | ||||
| @ -181,11 +189,11 @@ int drm_mm_insert_node(struct drm_mm *mm, struct drm_mm_node *node, | ||||
| { | ||||
| 	struct drm_mm_node *hole_node; | ||||
| 
 | ||||
| 	hole_node = drm_mm_search_free(mm, size, alignment, 0); | ||||
| 	hole_node = drm_mm_search_free(mm, size, alignment, false); | ||||
| 	if (!hole_node) | ||||
| 		return -ENOSPC; | ||||
| 
 | ||||
| 	drm_mm_insert_helper(hole_node, node, size, alignment); | ||||
| 	drm_mm_insert_helper(hole_node, node, size, alignment, 0); | ||||
| 
 | ||||
| 	return 0; | ||||
| } | ||||
| @ -194,50 +202,57 @@ EXPORT_SYMBOL(drm_mm_insert_node); | ||||
| static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node, | ||||
| 				       struct drm_mm_node *node, | ||||
| 				       unsigned long size, unsigned alignment, | ||||
| 				       unsigned long color, | ||||
| 				       unsigned long start, unsigned long end) | ||||
| { | ||||
| 	struct drm_mm *mm = hole_node->mm; | ||||
| 	unsigned long tmp = 0, wasted = 0; | ||||
| 	unsigned long hole_start = drm_mm_hole_node_start(hole_node); | ||||
| 	unsigned long hole_end = drm_mm_hole_node_end(hole_node); | ||||
| 	unsigned long adj_start = hole_start; | ||||
| 	unsigned long adj_end = hole_end; | ||||
| 
 | ||||
| 	BUG_ON(!hole_node->hole_follows || node->allocated); | ||||
| 
 | ||||
| 	if (hole_start < start) | ||||
| 		wasted += start - hole_start; | ||||
| 	if (alignment) | ||||
| 		tmp = (hole_start + wasted) % alignment; | ||||
| 	if (mm->color_adjust) | ||||
| 		mm->color_adjust(hole_node, color, &adj_start, &adj_end); | ||||
| 
 | ||||
| 	if (tmp) | ||||
| 		wasted += alignment - tmp; | ||||
| 	if (adj_start < start) | ||||
| 		adj_start = start; | ||||
| 
 | ||||
| 	if (!wasted) { | ||||
| 		hole_node->hole_follows = 0; | ||||
| 		list_del_init(&hole_node->hole_stack); | ||||
| 	if (alignment) { | ||||
| 		unsigned tmp = adj_start % alignment; | ||||
| 		if (tmp) | ||||
| 			adj_start += alignment - tmp; | ||||
| 	} | ||||
| 
 | ||||
| 	node->start = hole_start + wasted; | ||||
| 	if (adj_start == hole_start) { | ||||
| 		hole_node->hole_follows = 0; | ||||
| 		list_del(&hole_node->hole_stack); | ||||
| 	} | ||||
| 
 | ||||
| 	node->start = adj_start; | ||||
| 	node->size = size; | ||||
| 	node->mm = mm; | ||||
| 	node->color = color; | ||||
| 	node->allocated = 1; | ||||
| 
 | ||||
| 	INIT_LIST_HEAD(&node->hole_stack); | ||||
| 	list_add(&node->node_list, &hole_node->node_list); | ||||
| 
 | ||||
| 	BUG_ON(node->start + node->size > hole_end); | ||||
| 	BUG_ON(node->start + node->size > adj_end); | ||||
| 	BUG_ON(node->start + node->size > end); | ||||
| 
 | ||||
| 	node->hole_follows = 0; | ||||
| 	if (node->start + node->size < hole_end) { | ||||
| 		list_add(&node->hole_stack, &mm->hole_stack); | ||||
| 		node->hole_follows = 1; | ||||
| 	} else { | ||||
| 		node->hole_follows = 0; | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| struct drm_mm_node *drm_mm_get_block_range_generic(struct drm_mm_node *hole_node, | ||||
| 						unsigned long size, | ||||
| 						unsigned alignment, | ||||
| 						unsigned long color, | ||||
| 						unsigned long start, | ||||
| 						unsigned long end, | ||||
| 						int atomic) | ||||
| @ -248,7 +263,7 @@ struct drm_mm_node *drm_mm_get_block_range_generic(struct drm_mm_node *hole_node | ||||
| 	if (unlikely(node == NULL)) | ||||
| 		return NULL; | ||||
| 
 | ||||
| 	drm_mm_insert_helper_range(hole_node, node, size, alignment, | ||||
| 	drm_mm_insert_helper_range(hole_node, node, size, alignment, color, | ||||
| 				   start, end); | ||||
| 
 | ||||
| 	return node; | ||||
| @ -267,11 +282,11 @@ int drm_mm_insert_node_in_range(struct drm_mm *mm, struct drm_mm_node *node, | ||||
| 	struct drm_mm_node *hole_node; | ||||
| 
 | ||||
| 	hole_node = drm_mm_search_free_in_range(mm, size, alignment, | ||||
| 						start, end, 0); | ||||
| 						start, end, false); | ||||
| 	if (!hole_node) | ||||
| 		return -ENOSPC; | ||||
| 
 | ||||
| 	drm_mm_insert_helper_range(hole_node, node, size, alignment, | ||||
| 	drm_mm_insert_helper_range(hole_node, node, size, alignment, 0, | ||||
| 				   start, end); | ||||
| 
 | ||||
| 	return 0; | ||||
| @ -336,27 +351,23 @@ EXPORT_SYMBOL(drm_mm_put_block); | ||||
| static int check_free_hole(unsigned long start, unsigned long end, | ||||
| 			   unsigned long size, unsigned alignment) | ||||
| { | ||||
| 	unsigned wasted = 0; | ||||
| 
 | ||||
| 	if (end - start < size) | ||||
| 		return 0; | ||||
| 
 | ||||
| 	if (alignment) { | ||||
| 		unsigned tmp = start % alignment; | ||||
| 		if (tmp) | ||||
| 			wasted = alignment - tmp; | ||||
| 			start += alignment - tmp; | ||||
| 	} | ||||
| 
 | ||||
| 	if (end >= start + size + wasted) { | ||||
| 		return 1; | ||||
| 	} | ||||
| 
 | ||||
| 	return 0; | ||||
| 	return end >= start + size; | ||||
| } | ||||
| 
 | ||||
| struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm, | ||||
| 				       unsigned long size, | ||||
| 				       unsigned alignment, int best_match) | ||||
| struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm, | ||||
| 					       unsigned long size, | ||||
| 					       unsigned alignment, | ||||
| 					       unsigned long color, | ||||
| 					       bool best_match) | ||||
| { | ||||
| 	struct drm_mm_node *entry; | ||||
| 	struct drm_mm_node *best; | ||||
| @ -368,10 +379,17 @@ struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm, | ||||
| 	best_size = ~0UL; | ||||
| 
 | ||||
| 	list_for_each_entry(entry, &mm->hole_stack, hole_stack) { | ||||
| 		unsigned long adj_start = drm_mm_hole_node_start(entry); | ||||
| 		unsigned long adj_end = drm_mm_hole_node_end(entry); | ||||
| 
 | ||||
| 		if (mm->color_adjust) { | ||||
| 			mm->color_adjust(entry, color, &adj_start, &adj_end); | ||||
| 			if (adj_end <= adj_start) | ||||
| 				continue; | ||||
| 		} | ||||
| 
 | ||||
| 		BUG_ON(!entry->hole_follows); | ||||
| 		if (!check_free_hole(drm_mm_hole_node_start(entry), | ||||
| 				     drm_mm_hole_node_end(entry), | ||||
| 				     size, alignment)) | ||||
| 		if (!check_free_hole(adj_start, adj_end, size, alignment)) | ||||
| 			continue; | ||||
| 
 | ||||
| 		if (!best_match) | ||||
| @ -385,14 +403,15 @@ struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm, | ||||
| 
 | ||||
| 	return best; | ||||
| } | ||||
| EXPORT_SYMBOL(drm_mm_search_free); | ||||
| EXPORT_SYMBOL(drm_mm_search_free_generic); | ||||
| 
 | ||||
| struct drm_mm_node *drm_mm_search_free_in_range(const struct drm_mm *mm, | ||||
| 						unsigned long size, | ||||
| 						unsigned alignment, | ||||
| 						unsigned long start, | ||||
| 						unsigned long end, | ||||
| 						int best_match) | ||||
| struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm, | ||||
| 							unsigned long size, | ||||
| 							unsigned alignment, | ||||
| 							unsigned long color, | ||||
| 							unsigned long start, | ||||
| 							unsigned long end, | ||||
| 							bool best_match) | ||||
| { | ||||
| 	struct drm_mm_node *entry; | ||||
| 	struct drm_mm_node *best; | ||||
| @ -410,6 +429,13 @@ struct drm_mm_node *drm_mm_search_free_in_range(const struct drm_mm *mm, | ||||
| 			end : drm_mm_hole_node_end(entry); | ||||
| 
 | ||||
| 		BUG_ON(!entry->hole_follows); | ||||
| 
 | ||||
| 		if (mm->color_adjust) { | ||||
| 			mm->color_adjust(entry, color, &adj_start, &adj_end); | ||||
| 			if (adj_end <= adj_start) | ||||
| 				continue; | ||||
| 		} | ||||
| 
 | ||||
| 		if (!check_free_hole(adj_start, adj_end, size, alignment)) | ||||
| 			continue; | ||||
| 
 | ||||
| @ -424,7 +450,7 @@ struct drm_mm_node *drm_mm_search_free_in_range(const struct drm_mm *mm, | ||||
| 
 | ||||
| 	return best; | ||||
| } | ||||
| EXPORT_SYMBOL(drm_mm_search_free_in_range); | ||||
| EXPORT_SYMBOL(drm_mm_search_free_in_range_generic); | ||||
| 
 | ||||
| /**
 | ||||
|  * Moves an allocation. To be used with embedded struct drm_mm_node. | ||||
| @ -437,6 +463,7 @@ void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new) | ||||
| 	new->mm = old->mm; | ||||
| 	new->start = old->start; | ||||
| 	new->size = old->size; | ||||
| 	new->color = old->color; | ||||
| 
 | ||||
| 	old->allocated = 0; | ||||
| 	new->allocated = 1; | ||||
| @ -452,9 +479,12 @@ EXPORT_SYMBOL(drm_mm_replace_node); | ||||
|  * Warning: As long as the scan list is non-empty, no other operations than | ||||
|  * adding/removing nodes to/from the scan list are allowed. | ||||
|  */ | ||||
| void drm_mm_init_scan(struct drm_mm *mm, unsigned long size, | ||||
| 		      unsigned alignment) | ||||
| void drm_mm_init_scan(struct drm_mm *mm, | ||||
| 		      unsigned long size, | ||||
| 		      unsigned alignment, | ||||
| 		      unsigned long color) | ||||
| { | ||||
| 	mm->scan_color = color; | ||||
| 	mm->scan_alignment = alignment; | ||||
| 	mm->scan_size = size; | ||||
| 	mm->scanned_blocks = 0; | ||||
| @ -474,11 +504,14 @@ EXPORT_SYMBOL(drm_mm_init_scan); | ||||
|  * Warning: As long as the scan list is non-empty, no other operations than | ||||
|  * adding/removing nodes to/from the scan list are allowed. | ||||
|  */ | ||||
| void drm_mm_init_scan_with_range(struct drm_mm *mm, unsigned long size, | ||||
| void drm_mm_init_scan_with_range(struct drm_mm *mm, | ||||
| 				 unsigned long size, | ||||
| 				 unsigned alignment, | ||||
| 				 unsigned long color, | ||||
| 				 unsigned long start, | ||||
| 				 unsigned long end) | ||||
| { | ||||
| 	mm->scan_color = color; | ||||
| 	mm->scan_alignment = alignment; | ||||
| 	mm->scan_size = size; | ||||
| 	mm->scanned_blocks = 0; | ||||
| @ -522,17 +555,21 @@ int drm_mm_scan_add_block(struct drm_mm_node *node) | ||||
| 
 | ||||
| 	hole_start = drm_mm_hole_node_start(prev_node); | ||||
| 	hole_end = drm_mm_hole_node_end(prev_node); | ||||
| 
 | ||||
| 	adj_start = hole_start; | ||||
| 	adj_end = hole_end; | ||||
| 
 | ||||
| 	if (mm->color_adjust) | ||||
| 		mm->color_adjust(prev_node, mm->scan_color, &adj_start, &adj_end); | ||||
| 
 | ||||
| 	if (mm->scan_check_range) { | ||||
| 		adj_start = hole_start < mm->scan_start ? | ||||
| 			mm->scan_start : hole_start; | ||||
| 		adj_end = hole_end > mm->scan_end ? | ||||
| 			mm->scan_end : hole_end; | ||||
| 	} else { | ||||
| 		adj_start = hole_start; | ||||
| 		adj_end = hole_end; | ||||
| 		if (adj_start < mm->scan_start) | ||||
| 			adj_start = mm->scan_start; | ||||
| 		if (adj_end > mm->scan_end) | ||||
| 			adj_end = mm->scan_end; | ||||
| 	} | ||||
| 
 | ||||
| 	if (check_free_hole(adj_start , adj_end, | ||||
| 	if (check_free_hole(adj_start, adj_end, | ||||
| 			    mm->scan_size, mm->scan_alignment)) { | ||||
| 		mm->scan_hit_start = hole_start; | ||||
| 		mm->scan_hit_size = hole_end; | ||||
| @ -616,6 +653,8 @@ int drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size) | ||||
| 	mm->head_node.size = start - mm->head_node.start; | ||||
| 	list_add_tail(&mm->head_node.hole_stack, &mm->hole_stack); | ||||
| 
 | ||||
| 	mm->color_adjust = NULL; | ||||
| 
 | ||||
| 	return 0; | ||||
| } | ||||
| EXPORT_SYMBOL(drm_mm_init); | ||||
|  | ||||
| @ -465,3 +465,52 @@ void drm_pci_exit(struct drm_driver *driver, struct pci_driver *pdriver) | ||||
| 	DRM_INFO("Module unloaded\n"); | ||||
| } | ||||
| EXPORT_SYMBOL(drm_pci_exit); | ||||
| 
 | ||||
| int drm_pcie_get_speed_cap_mask(struct drm_device *dev, u32 *mask) | ||||
| { | ||||
| 	struct pci_dev *root; | ||||
| 	int pos; | ||||
| 	u32 lnkcap, lnkcap2; | ||||
| 
 | ||||
| 	*mask = 0; | ||||
| 	if (!dev->pdev) | ||||
| 		return -EINVAL; | ||||
| 
 | ||||
| 	if (!pci_is_pcie(dev->pdev)) | ||||
| 		return -EINVAL; | ||||
| 
 | ||||
| 	root = dev->pdev->bus->self; | ||||
| 
 | ||||
| 	pos = pci_pcie_cap(root); | ||||
| 	if (!pos) | ||||
| 		return -EINVAL; | ||||
| 
 | ||||
| 	/* we've been informed via and serverworks don't make the cut */ | ||||
| 	if (root->vendor == PCI_VENDOR_ID_VIA || | ||||
| 	    root->vendor == PCI_VENDOR_ID_SERVERWORKS) | ||||
| 		return -EINVAL; | ||||
| 
 | ||||
| 	pci_read_config_dword(root, pos + PCI_EXP_LNKCAP, &lnkcap); | ||||
| 	pci_read_config_dword(root, pos + PCI_EXP_LNKCAP2, &lnkcap2); | ||||
| 
 | ||||
| 	lnkcap &= PCI_EXP_LNKCAP_SLS; | ||||
| 	lnkcap2 &= 0xfe; | ||||
| 
 | ||||
| 	if (lnkcap2) { /* PCIE GEN 3.0 */ | ||||
| 		if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_2_5GB) | ||||
| 			*mask |= DRM_PCIE_SPEED_25; | ||||
| 		if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_5_0GB) | ||||
| 			*mask |= DRM_PCIE_SPEED_50; | ||||
| 		if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_8_0GB) | ||||
| 			*mask |= DRM_PCIE_SPEED_80; | ||||
| 	} else { | ||||
| 		if (lnkcap & 1) | ||||
| 			*mask |= DRM_PCIE_SPEED_25; | ||||
| 		if (lnkcap & 2) | ||||
| 			*mask |= DRM_PCIE_SPEED_50; | ||||
| 	} | ||||
| 
 | ||||
| 	DRM_INFO("probing gen 2 caps for device %x:%x = %x/%x\n", root->vendor, root->device, lnkcap, lnkcap2); | ||||
| 	return 0; | ||||
| } | ||||
| EXPORT_SYMBOL(drm_pcie_get_speed_cap_mask); | ||||
|  | ||||
| @ -53,7 +53,6 @@ static struct drm_info_list drm_proc_list[] = { | ||||
| 	{"name", drm_name_info, 0}, | ||||
| 	{"vm", drm_vm_info, 0}, | ||||
| 	{"clients", drm_clients_info, 0}, | ||||
| 	{"queues", drm_queues_info, 0}, | ||||
| 	{"bufs", drm_bufs_info, 0}, | ||||
| 	{"gem_names", drm_gem_name_info, DRIVER_GEM}, | ||||
| #if DRM_DEBUG_CODE | ||||
|  | ||||
| @ -134,6 +134,7 @@ void drm_sysfs_destroy(void) | ||||
| 		return; | ||||
| 	class_remove_file(drm_class, &class_attr_version.attr); | ||||
| 	class_destroy(drm_class); | ||||
| 	drm_class = NULL; | ||||
| } | ||||
| 
 | ||||
| /**
 | ||||
| @ -554,6 +555,9 @@ void drm_sysfs_device_remove(struct drm_minor *minor) | ||||
| 
 | ||||
| int drm_class_device_register(struct device *dev) | ||||
| { | ||||
| 	if (!drm_class || IS_ERR(drm_class)) | ||||
| 		return -ENOENT; | ||||
| 
 | ||||
| 	dev->class = drm_class; | ||||
| 	return device_register(dev); | ||||
| } | ||||
|  | ||||
| @ -237,7 +237,7 @@ static void exynos_drm_crtc_commit(struct drm_crtc *crtc) | ||||
| 
 | ||||
| static bool | ||||
| exynos_drm_crtc_mode_fixup(struct drm_crtc *crtc, | ||||
| 			    struct drm_display_mode *mode, | ||||
| 			    const struct drm_display_mode *mode, | ||||
| 			    struct drm_display_mode *adjusted_mode) | ||||
| { | ||||
| 	DRM_DEBUG_KMS("%s\n", __FILE__); | ||||
|  | ||||
| @ -174,7 +174,7 @@ struct exynos_drm_manager_ops { | ||||
| 	void (*apply)(struct device *subdrv_dev); | ||||
| 	void (*mode_fixup)(struct device *subdrv_dev, | ||||
| 				struct drm_connector *connector, | ||||
| 				struct drm_display_mode *mode, | ||||
| 				const struct drm_display_mode *mode, | ||||
| 				struct drm_display_mode *adjusted_mode); | ||||
| 	void (*mode_set)(struct device *subdrv_dev, void *mode); | ||||
| 	void (*get_max_resol)(struct device *subdrv_dev, unsigned int *width, | ||||
|  | ||||
| @ -108,7 +108,7 @@ static void exynos_drm_encoder_dpms(struct drm_encoder *encoder, int mode) | ||||
| 
 | ||||
| static bool | ||||
| exynos_drm_encoder_mode_fixup(struct drm_encoder *encoder, | ||||
| 			       struct drm_display_mode *mode, | ||||
| 			       const struct drm_display_mode *mode, | ||||
| 			       struct drm_display_mode *adjusted_mode) | ||||
| { | ||||
| 	struct drm_device *dev = encoder->dev; | ||||
|  | ||||
| @ -142,7 +142,7 @@ static void drm_hdmi_disable_vblank(struct device *subdrv_dev) | ||||
| 
 | ||||
| static void drm_hdmi_mode_fixup(struct device *subdrv_dev, | ||||
| 				struct drm_connector *connector, | ||||
| 				struct drm_display_mode *mode, | ||||
| 				const struct drm_display_mode *mode, | ||||
| 				struct drm_display_mode *adjusted_mode) | ||||
| { | ||||
| 	struct drm_hdmi_context *ctx = to_context(subdrv_dev); | ||||
|  | ||||
| @ -51,7 +51,7 @@ struct exynos_hdmi_ops { | ||||
| 
 | ||||
| 	/* manager */ | ||||
| 	void (*mode_fixup)(void *ctx, struct drm_connector *connector, | ||||
| 				struct drm_display_mode *mode, | ||||
| 				const struct drm_display_mode *mode, | ||||
| 				struct drm_display_mode *adjusted_mode); | ||||
| 	void (*mode_set)(void *ctx, void *mode); | ||||
| 	void (*get_max_resol)(void *ctx, unsigned int *width, | ||||
|  | ||||
| @ -1940,7 +1940,7 @@ static void hdmi_conf_apply(struct hdmi_context *hdata) | ||||
| } | ||||
| 
 | ||||
| static void hdmi_mode_fixup(void *ctx, struct drm_connector *connector, | ||||
| 				struct drm_display_mode *mode, | ||||
| 				const struct drm_display_mode *mode, | ||||
| 				struct drm_display_mode *adjusted_mode) | ||||
| { | ||||
| 	struct drm_display_mode *m; | ||||
|  | ||||
| @ -82,7 +82,7 @@ static int cdv_intel_crt_mode_valid(struct drm_connector *connector, | ||||
| } | ||||
| 
 | ||||
| static bool cdv_intel_crt_mode_fixup(struct drm_encoder *encoder, | ||||
| 				 struct drm_display_mode *mode, | ||||
| 				 const struct drm_display_mode *mode, | ||||
| 				 struct drm_display_mode *adjusted_mode) | ||||
| { | ||||
| 	return true; | ||||
|  | ||||
| @ -913,7 +913,7 @@ static void cdv_intel_crtc_commit(struct drm_crtc *crtc) | ||||
| } | ||||
| 
 | ||||
| static bool cdv_intel_crtc_mode_fixup(struct drm_crtc *crtc, | ||||
| 				  struct drm_display_mode *mode, | ||||
| 				  const struct drm_display_mode *mode, | ||||
| 				  struct drm_display_mode *adjusted_mode) | ||||
| { | ||||
| 	return true; | ||||
|  | ||||
| @ -90,7 +90,7 @@ static void cdv_hdmi_mode_set(struct drm_encoder *encoder, | ||||
| } | ||||
| 
 | ||||
| static bool cdv_hdmi_mode_fixup(struct drm_encoder *encoder, | ||||
| 				  struct drm_display_mode *mode, | ||||
| 				  const struct drm_display_mode *mode, | ||||
| 				  struct drm_display_mode *adjusted_mode) | ||||
| { | ||||
| 	return true; | ||||
|  | ||||
| @ -270,7 +270,7 @@ static int cdv_intel_lvds_mode_valid(struct drm_connector *connector, | ||||
| } | ||||
| 
 | ||||
| static bool cdv_intel_lvds_mode_fixup(struct drm_encoder *encoder, | ||||
| 				  struct drm_display_mode *mode, | ||||
| 				  const struct drm_display_mode *mode, | ||||
| 				  struct drm_display_mode *adjusted_mode) | ||||
| { | ||||
| 	struct drm_device *dev = encoder->dev; | ||||
|  | ||||
| @ -427,7 +427,7 @@ parse_device_mapping(struct drm_psb_private *dev_priv, | ||||
|  * | ||||
|  * Returns 0 on success, nonzero on failure. | ||||
|  */ | ||||
| bool psb_intel_init_bios(struct drm_device *dev) | ||||
| int psb_intel_init_bios(struct drm_device *dev) | ||||
| { | ||||
| 	struct drm_psb_private *dev_priv = dev->dev_private; | ||||
| 	struct pci_dev *pdev = dev->pdev; | ||||
|  | ||||
| @ -431,7 +431,7 @@ struct bdb_driver_features { | ||||
| 	u8 custom_vbt_version; | ||||
| } __attribute__((packed)); | ||||
| 
 | ||||
| extern bool psb_intel_init_bios(struct drm_device *dev); | ||||
| extern int psb_intel_init_bios(struct drm_device *dev); | ||||
| extern void psb_intel_destroy_bios(struct drm_device *dev); | ||||
| 
 | ||||
| /*
 | ||||
|  | ||||
| @ -684,7 +684,7 @@ void mdfld_dsi_dpi_dpms(struct drm_encoder *encoder, int mode) | ||||
| } | ||||
| 
 | ||||
| bool mdfld_dsi_dpi_mode_fixup(struct drm_encoder *encoder, | ||||
| 				     struct drm_display_mode *mode, | ||||
| 				     const struct drm_display_mode *mode, | ||||
| 				     struct drm_display_mode *adjusted_mode) | ||||
| { | ||||
| 	struct mdfld_dsi_encoder *dsi_encoder = mdfld_dsi_encoder(encoder); | ||||
|  | ||||
| @ -65,7 +65,7 @@ extern struct mdfld_dsi_encoder *mdfld_dsi_dpi_init(struct drm_device *dev, | ||||
| /* MDFLD DPI helper functions */ | ||||
| extern void mdfld_dsi_dpi_dpms(struct drm_encoder *encoder, int mode); | ||||
| extern bool mdfld_dsi_dpi_mode_fixup(struct drm_encoder *encoder, | ||||
| 				struct drm_display_mode *mode, | ||||
| 				const struct drm_display_mode *mode, | ||||
| 				struct drm_display_mode *adjusted_mode); | ||||
| extern void mdfld_dsi_dpi_prepare(struct drm_encoder *encoder); | ||||
| extern void mdfld_dsi_dpi_commit(struct drm_encoder *encoder); | ||||
|  | ||||
| @ -117,7 +117,7 @@ static void psb_intel_crtc_commit(struct drm_crtc *crtc) | ||||
| } | ||||
| 
 | ||||
| static bool psb_intel_crtc_mode_fixup(struct drm_crtc *crtc, | ||||
| 				  struct drm_display_mode *mode, | ||||
| 				  const struct drm_display_mode *mode, | ||||
| 				  struct drm_display_mode *adjusted_mode) | ||||
| { | ||||
| 	return true; | ||||
|  | ||||
| @ -487,7 +487,7 @@ oaktrail_crtc_mode_set_exit: | ||||
| } | ||||
| 
 | ||||
| static bool oaktrail_crtc_mode_fixup(struct drm_crtc *crtc, | ||||
| 				  struct drm_display_mode *mode, | ||||
| 				  const struct drm_display_mode *mode, | ||||
| 				  struct drm_display_mode *adjusted_mode) | ||||
| { | ||||
| 	return true; | ||||
|  | ||||
| @ -191,7 +191,7 @@ static int oaktrail_hdmi_mode_valid(struct drm_connector *connector, | ||||
| } | ||||
| 
 | ||||
| static bool oaktrail_hdmi_mode_fixup(struct drm_encoder *encoder, | ||||
| 				 struct drm_display_mode *mode, | ||||
| 				 const struct drm_display_mode *mode, | ||||
| 				 struct drm_display_mode *adjusted_mode) | ||||
| { | ||||
| 	return true; | ||||
|  | ||||
| @ -633,7 +633,6 @@ static struct drm_driver driver = { | ||||
| 	.open = psb_driver_open, | ||||
| 	.preclose = psb_driver_preclose, | ||||
| 	.postclose = psb_driver_close, | ||||
| 	.reclaim_buffers = drm_core_reclaim_buffers, | ||||
| 
 | ||||
| 	.gem_init_object = psb_gem_init_object, | ||||
| 	.gem_free_object = psb_gem_free_object, | ||||
|  | ||||
| @ -543,7 +543,7 @@ void psb_intel_encoder_destroy(struct drm_encoder *encoder) | ||||
| } | ||||
| 
 | ||||
| static bool psb_intel_crtc_mode_fixup(struct drm_crtc *crtc, | ||||
| 				  struct drm_display_mode *mode, | ||||
| 				  const struct drm_display_mode *mode, | ||||
| 				  struct drm_display_mode *adjusted_mode) | ||||
| { | ||||
| 	return true; | ||||
|  | ||||
| @ -268,7 +268,7 @@ extern struct drm_framebuffer *psb_intel_framebuffer_create(struct drm_device | ||||
| 							*mode_cmd, | ||||
| 							void *mm_private); | ||||
| extern bool psb_intel_lvds_mode_fixup(struct drm_encoder *encoder, | ||||
| 				      struct drm_display_mode *mode, | ||||
| 				      const struct drm_display_mode *mode, | ||||
| 				      struct drm_display_mode *adjusted_mode); | ||||
| extern int psb_intel_lvds_mode_valid(struct drm_connector *connector, | ||||
| 				     struct drm_display_mode *mode); | ||||
|  | ||||
| @ -375,7 +375,7 @@ int psb_intel_lvds_mode_valid(struct drm_connector *connector, | ||||
| } | ||||
| 
 | ||||
| bool psb_intel_lvds_mode_fixup(struct drm_encoder *encoder, | ||||
| 				  struct drm_display_mode *mode, | ||||
| 				  const struct drm_display_mode *mode, | ||||
| 				  struct drm_display_mode *adjusted_mode) | ||||
| { | ||||
| 	struct drm_device *dev = encoder->dev; | ||||
|  | ||||
| @ -901,7 +901,7 @@ static bool psb_intel_sdvo_set_tv_format(struct psb_intel_sdvo *psb_intel_sdvo) | ||||
| 
 | ||||
| static bool | ||||
| psb_intel_sdvo_set_output_timings_from_mode(struct psb_intel_sdvo *psb_intel_sdvo, | ||||
| 					struct drm_display_mode *mode) | ||||
| 					const struct drm_display_mode *mode) | ||||
| { | ||||
| 	struct psb_intel_sdvo_dtd output_dtd; | ||||
| 
 | ||||
| @ -918,7 +918,7 @@ psb_intel_sdvo_set_output_timings_from_mode(struct psb_intel_sdvo *psb_intel_sdv | ||||
| 
 | ||||
| static bool | ||||
| psb_intel_sdvo_set_input_timings_for_mode(struct psb_intel_sdvo *psb_intel_sdvo, | ||||
| 					struct drm_display_mode *mode, | ||||
| 					const struct drm_display_mode *mode, | ||||
| 					struct drm_display_mode *adjusted_mode) | ||||
| { | ||||
| 	/* Reset the input timing to the screen. Assume always input 0. */ | ||||
| @ -942,7 +942,7 @@ psb_intel_sdvo_set_input_timings_for_mode(struct psb_intel_sdvo *psb_intel_sdvo, | ||||
| } | ||||
| 
 | ||||
| static bool psb_intel_sdvo_mode_fixup(struct drm_encoder *encoder, | ||||
| 				  struct drm_display_mode *mode, | ||||
| 				  const struct drm_display_mode *mode, | ||||
| 				  struct drm_display_mode *adjusted_mode) | ||||
| { | ||||
| 	struct psb_intel_sdvo *psb_intel_sdvo = to_psb_intel_sdvo(encoder); | ||||
|  | ||||
| @ -88,7 +88,7 @@ static void ch7006_encoder_restore(struct drm_encoder *encoder) | ||||
| } | ||||
| 
 | ||||
| static bool ch7006_encoder_mode_fixup(struct drm_encoder *encoder, | ||||
| 				      struct drm_display_mode *mode, | ||||
| 				      const struct drm_display_mode *mode, | ||||
| 				      struct drm_display_mode *adjusted_mode) | ||||
| { | ||||
| 	struct ch7006_priv *priv = to_ch7006_priv(encoder); | ||||
|  | ||||
| @ -172,7 +172,7 @@ struct ch7006_mode ch7006_modes[] = { | ||||
| }; | ||||
| 
 | ||||
| struct ch7006_mode *ch7006_lookup_mode(struct drm_encoder *encoder, | ||||
| 				       struct drm_display_mode *drm_mode) | ||||
| 				       const struct drm_display_mode *drm_mode) | ||||
| { | ||||
| 	struct ch7006_priv *priv = to_ch7006_priv(encoder); | ||||
| 	struct ch7006_mode *mode; | ||||
|  | ||||
| @ -111,7 +111,7 @@ extern struct ch7006_tv_norm_info ch7006_tv_norms[]; | ||||
| extern struct ch7006_mode ch7006_modes[]; | ||||
| 
 | ||||
| struct ch7006_mode *ch7006_lookup_mode(struct drm_encoder *encoder, | ||||
| 				       struct drm_display_mode *drm_mode); | ||||
| 				       const struct drm_display_mode *drm_mode); | ||||
| 
 | ||||
| void ch7006_setup_levels(struct drm_encoder *encoder); | ||||
| void ch7006_setup_subcarrier(struct drm_encoder *encoder); | ||||
|  | ||||
| @ -254,7 +254,7 @@ sil164_encoder_restore(struct drm_encoder *encoder) | ||||
| 
 | ||||
| static bool | ||||
| sil164_encoder_mode_fixup(struct drm_encoder *encoder, | ||||
| 			  struct drm_display_mode *mode, | ||||
| 			  const struct drm_display_mode *mode, | ||||
| 			  struct drm_display_mode *adjusted_mode) | ||||
| { | ||||
| 	return true; | ||||
|  | ||||
| @ -881,7 +881,7 @@ static int i810_flush_queue(struct drm_device *dev) | ||||
| } | ||||
| 
 | ||||
| /* Must be called with the lock held */ | ||||
| static void i810_reclaim_buffers(struct drm_device *dev, | ||||
| void i810_driver_reclaim_buffers(struct drm_device *dev, | ||||
| 				 struct drm_file *file_priv) | ||||
| { | ||||
| 	struct drm_device_dma *dma = dev->dma; | ||||
| @ -1220,12 +1220,17 @@ void i810_driver_preclose(struct drm_device *dev, struct drm_file *file_priv) | ||||
| 		if (dev_priv->page_flipping) | ||||
| 			i810_do_cleanup_pageflip(dev); | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| void i810_driver_reclaim_buffers_locked(struct drm_device *dev, | ||||
| 					struct drm_file *file_priv) | ||||
| { | ||||
| 	i810_reclaim_buffers(dev, file_priv); | ||||
| 	if (file_priv->master && file_priv->master->lock.hw_lock) { | ||||
| 		drm_idlelock_take(&file_priv->master->lock); | ||||
| 		i810_driver_reclaim_buffers(dev, file_priv); | ||||
| 		drm_idlelock_release(&file_priv->master->lock); | ||||
| 	} else { | ||||
| 		/* master disappeared, clean up stuff anyway and hope nothing
 | ||||
| 		 * goes wrong */ | ||||
| 		i810_driver_reclaim_buffers(dev, file_priv); | ||||
| 	} | ||||
| 
 | ||||
| } | ||||
| 
 | ||||
| int i810_driver_dma_quiescent(struct drm_device *dev) | ||||
|  | ||||
| @ -57,13 +57,12 @@ static const struct file_operations i810_driver_fops = { | ||||
| static struct drm_driver driver = { | ||||
| 	.driver_features = | ||||
| 	    DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | DRIVER_USE_MTRR | | ||||
| 	    DRIVER_HAVE_DMA | DRIVER_DMA_QUEUE, | ||||
| 	    DRIVER_HAVE_DMA, | ||||
| 	.dev_priv_size = sizeof(drm_i810_buf_priv_t), | ||||
| 	.load = i810_driver_load, | ||||
| 	.lastclose = i810_driver_lastclose, | ||||
| 	.preclose = i810_driver_preclose, | ||||
| 	.device_is_agp = i810_driver_device_is_agp, | ||||
| 	.reclaim_buffers_locked = i810_driver_reclaim_buffers_locked, | ||||
| 	.dma_quiescent = i810_driver_dma_quiescent, | ||||
| 	.ioctls = i810_ioctls, | ||||
| 	.fops = &i810_driver_fops, | ||||
|  | ||||
| @ -116,14 +116,12 @@ typedef struct drm_i810_private { | ||||
| 
 | ||||
| 				/* i810_dma.c */ | ||||
| extern int i810_driver_dma_quiescent(struct drm_device *dev); | ||||
| extern void i810_driver_reclaim_buffers_locked(struct drm_device *dev, | ||||
| 					       struct drm_file *file_priv); | ||||
| void i810_driver_reclaim_buffers(struct drm_device *dev, | ||||
| 			         struct drm_file *file_priv); | ||||
| extern int i810_driver_load(struct drm_device *, unsigned long flags); | ||||
| extern void i810_driver_lastclose(struct drm_device *dev); | ||||
| extern void i810_driver_preclose(struct drm_device *dev, | ||||
| 				 struct drm_file *file_priv); | ||||
| extern void i810_driver_reclaim_buffers_locked(struct drm_device *dev, | ||||
| 					       struct drm_file *file_priv); | ||||
| extern int i810_driver_device_is_agp(struct drm_device *dev); | ||||
| 
 | ||||
| extern long i810_ioctl(struct file *file, unsigned int cmd, unsigned long arg); | ||||
|  | ||||
| @ -7,6 +7,7 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o \ | ||||
| 	  i915_debugfs.o \
 | ||||
|           i915_suspend.o \
 | ||||
| 	  i915_gem.o \
 | ||||
| 	  i915_gem_context.o \
 | ||||
| 	  i915_gem_debug.o \
 | ||||
| 	  i915_gem_evict.o \
 | ||||
| 	  i915_gem_execbuffer.o \
 | ||||
|  | ||||
| @ -86,7 +86,7 @@ struct intel_dvo_dev_ops { | ||||
| 	 * buses with clock limitations. | ||||
| 	 */ | ||||
| 	bool (*mode_fixup)(struct intel_dvo_device *dvo, | ||||
| 			   struct drm_display_mode *mode, | ||||
| 			   const struct drm_display_mode *mode, | ||||
| 			   struct drm_display_mode *adjusted_mode); | ||||
| 
 | ||||
| 	/*
 | ||||
|  | ||||
| @ -676,6 +676,7 @@ static void i915_ring_error_state(struct seq_file *m, | ||||
| 	seq_printf(m, "  INSTPM: 0x%08x\n", error->instpm[ring]); | ||||
| 	seq_printf(m, "  FADDR: 0x%08x\n", error->faddr[ring]); | ||||
| 	if (INTEL_INFO(dev)->gen >= 6) { | ||||
| 		seq_printf(m, "  RC PSMI: 0x%08x\n", error->rc_psmi[ring]); | ||||
| 		seq_printf(m, "  FAULT_REG: 0x%08x\n", error->fault_reg[ring]); | ||||
| 		seq_printf(m, "  SYNC_0: 0x%08x\n", | ||||
| 			   error->semaphore_mboxes[ring][0]); | ||||
| @ -713,6 +714,7 @@ static int i915_error_state(struct seq_file *m, void *unused) | ||||
| 	seq_printf(m, "EIR: 0x%08x\n", error->eir); | ||||
| 	seq_printf(m, "IER: 0x%08x\n", error->ier); | ||||
| 	seq_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er); | ||||
| 	seq_printf(m, "CCID: 0x%08x\n", error->ccid); | ||||
| 
 | ||||
| 	for (i = 0; i < dev_priv->num_fence_regs; i++) | ||||
| 		seq_printf(m, "  fence[%d] = %08llx\n", i, error->fence[i]); | ||||
| @ -1764,6 +1766,64 @@ static const struct file_operations i915_max_freq_fops = { | ||||
| 	.llseek = default_llseek, | ||||
| }; | ||||
| 
 | ||||
| static ssize_t | ||||
| i915_min_freq_read(struct file *filp, char __user *ubuf, size_t max, | ||||
| 		   loff_t *ppos) | ||||
| { | ||||
| 	struct drm_device *dev = filp->private_data; | ||||
| 	drm_i915_private_t *dev_priv = dev->dev_private; | ||||
| 	char buf[80]; | ||||
| 	int len; | ||||
| 
 | ||||
| 	len = snprintf(buf, sizeof(buf), | ||||
| 		       "min freq: %d\n", dev_priv->min_delay * 50); | ||||
| 
 | ||||
| 	if (len > sizeof(buf)) | ||||
| 		len = sizeof(buf); | ||||
| 
 | ||||
| 	return simple_read_from_buffer(ubuf, max, ppos, buf, len); | ||||
| } | ||||
| 
 | ||||
| static ssize_t | ||||
| i915_min_freq_write(struct file *filp, const char __user *ubuf, size_t cnt, | ||||
| 		    loff_t *ppos) | ||||
| { | ||||
| 	struct drm_device *dev = filp->private_data; | ||||
| 	struct drm_i915_private *dev_priv = dev->dev_private; | ||||
| 	char buf[20]; | ||||
| 	int val = 1; | ||||
| 
 | ||||
| 	if (cnt > 0) { | ||||
| 		if (cnt > sizeof(buf) - 1) | ||||
| 			return -EINVAL; | ||||
| 
 | ||||
| 		if (copy_from_user(buf, ubuf, cnt)) | ||||
| 			return -EFAULT; | ||||
| 		buf[cnt] = 0; | ||||
| 
 | ||||
| 		val = simple_strtoul(buf, NULL, 0); | ||||
| 	} | ||||
| 
 | ||||
| 	DRM_DEBUG_DRIVER("Manually setting min freq to %d\n", val); | ||||
| 
 | ||||
| 	/*
 | ||||
| 	 * Turbo will still be enabled, but won't go below the set value. | ||||
| 	 */ | ||||
| 	dev_priv->min_delay = val / 50; | ||||
| 
 | ||||
| 	gen6_set_rps(dev, val / 50); | ||||
| 
 | ||||
| 	return cnt; | ||||
| } | ||||
| 
 | ||||
| static const struct file_operations i915_min_freq_fops = { | ||||
| 	.owner = THIS_MODULE, | ||||
| 	.open = simple_open, | ||||
| 	.read = i915_min_freq_read, | ||||
| 	.write = i915_min_freq_write, | ||||
| 	.llseek = default_llseek, | ||||
| }; | ||||
| 
 | ||||
| static ssize_t | ||||
| i915_cache_sharing_read(struct file *filp, | ||||
| 		   char __user *ubuf, | ||||
| @ -1996,6 +2056,12 @@ int i915_debugfs_init(struct drm_minor *minor) | ||||
| 	if (ret) | ||||
| 		return ret; | ||||
| 
 | ||||
| 	ret = i915_debugfs_create(minor->debugfs_root, minor, | ||||
| 				  "i915_min_freq", | ||||
| 				  &i915_min_freq_fops); | ||||
| 	if (ret) | ||||
| 		return ret; | ||||
| 
 | ||||
| 	ret = i915_debugfs_create(minor->debugfs_root, minor, | ||||
| 				  "i915_cache_sharing", | ||||
| 				  &i915_cache_sharing_fops); | ||||
| @ -2028,6 +2094,8 @@ void i915_debugfs_cleanup(struct drm_minor *minor) | ||||
| 				 1, minor); | ||||
| 	drm_debugfs_remove_files((struct drm_info_list *) &i915_max_freq_fops, | ||||
| 				 1, minor); | ||||
| 	drm_debugfs_remove_files((struct drm_info_list *) &i915_min_freq_fops, | ||||
| 				 1, minor); | ||||
| 	drm_debugfs_remove_files((struct drm_info_list *) &i915_cache_sharing_fops, | ||||
| 				 1, minor); | ||||
| 	drm_debugfs_remove_files((struct drm_info_list *) &i915_ring_stop_fops, | ||||
|  | ||||
| @ -1006,6 +1006,9 @@ static int i915_getparam(struct drm_device *dev, void *data, | ||||
| 	case I915_PARAM_HAS_ALIASING_PPGTT: | ||||
| 		value = dev_priv->mm.aliasing_ppgtt ? 1 : 0; | ||||
| 		break; | ||||
| 	case I915_PARAM_HAS_WAIT_TIMEOUT: | ||||
| 		value = 1; | ||||
| 		break; | ||||
| 	default: | ||||
| 		DRM_DEBUG_DRIVER("Unknown parameter %d\n", | ||||
| 				 param->param); | ||||
| @ -1082,8 +1085,8 @@ static int i915_set_status_page(struct drm_device *dev, void *data, | ||||
| 
 | ||||
| 	ring->status_page.gfx_addr = hws->addr & (0x1ffff<<12); | ||||
| 
 | ||||
| 	dev_priv->dri1.gfx_hws_cpu_addr = ioremap_wc(dev->agp->base + hws->addr, | ||||
| 						     4096); | ||||
| 	dev_priv->dri1.gfx_hws_cpu_addr = | ||||
| 		ioremap_wc(dev_priv->mm.gtt_base_addr + hws->addr, 4096); | ||||
| 	if (dev_priv->dri1.gfx_hws_cpu_addr == NULL) { | ||||
| 		i915_dma_cleanup(dev); | ||||
| 		ring->status_page.gfx_addr = 0; | ||||
| @ -1411,7 +1414,7 @@ static void i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv) | ||||
| 	if (!ap) | ||||
| 		return; | ||||
| 
 | ||||
| 	ap->ranges[0].base = dev_priv->dev->agp->base; | ||||
| 	ap->ranges[0].base = dev_priv->mm.gtt->gma_bus_addr; | ||||
| 	ap->ranges[0].size = | ||||
| 		dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT; | ||||
| 	primary = | ||||
| @ -1467,11 +1470,18 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) | ||||
| 		goto free_priv; | ||||
| 	} | ||||
| 
 | ||||
| 	ret = intel_gmch_probe(dev_priv->bridge_dev, dev->pdev, NULL); | ||||
| 	if (!ret) { | ||||
| 		DRM_ERROR("failed to set up gmch\n"); | ||||
| 		ret = -EIO; | ||||
| 		goto put_bridge; | ||||
| 	} | ||||
| 
 | ||||
| 	dev_priv->mm.gtt = intel_gtt_get(); | ||||
| 	if (!dev_priv->mm.gtt) { | ||||
| 		DRM_ERROR("Failed to initialize GTT\n"); | ||||
| 		ret = -ENODEV; | ||||
| 		goto put_bridge; | ||||
| 		goto put_gmch; | ||||
| 	} | ||||
| 
 | ||||
| 	i915_kick_out_firmware_fb(dev_priv); | ||||
| @ -1498,19 +1508,22 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) | ||||
| 	if (!dev_priv->regs) { | ||||
| 		DRM_ERROR("failed to map registers\n"); | ||||
| 		ret = -EIO; | ||||
| 		goto put_bridge; | ||||
| 		goto put_gmch; | ||||
| 	} | ||||
| 
 | ||||
| 	aperture_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT; | ||||
| 	dev_priv->mm.gtt_base_addr = dev_priv->mm.gtt->gma_bus_addr; | ||||
| 
 | ||||
| 	dev_priv->mm.gtt_mapping = | ||||
| 		io_mapping_create_wc(dev->agp->base, aperture_size); | ||||
| 		io_mapping_create_wc(dev_priv->mm.gtt_base_addr, | ||||
| 				     aperture_size); | ||||
| 	if (dev_priv->mm.gtt_mapping == NULL) { | ||||
| 		ret = -EIO; | ||||
| 		goto out_rmmap; | ||||
| 	} | ||||
| 
 | ||||
| 	i915_mtrr_setup(dev_priv, dev->agp->base, aperture_size); | ||||
| 	i915_mtrr_setup(dev_priv, dev_priv->mm.gtt_base_addr, | ||||
| 			aperture_size); | ||||
| 
 | ||||
| 	/* The i915 workqueue is primarily used for batched retirement of
 | ||||
| 	 * requests (and thus managing bo) once the task has been completed | ||||
| @ -1534,7 +1547,11 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) | ||||
| 		goto out_mtrrfree; | ||||
| 	} | ||||
| 
 | ||||
| 	/* This must be called before any calls to HAS_PCH_* */ | ||||
| 	intel_detect_pch(dev); | ||||
| 
 | ||||
| 	intel_irq_init(dev); | ||||
| 	intel_gt_init(dev); | ||||
| 
 | ||||
| 	/* Try to make sure MCHBAR is enabled before poking at it */ | ||||
| 	intel_setup_mchbar(dev); | ||||
| @ -1567,7 +1584,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) | ||||
| 	if (!IS_I945G(dev) && !IS_I945GM(dev)) | ||||
| 		pci_enable_msi(dev->pdev); | ||||
| 
 | ||||
| 	spin_lock_init(&dev_priv->gt_lock); | ||||
| 	spin_lock_init(&dev_priv->irq_lock); | ||||
| 	spin_lock_init(&dev_priv->error_lock); | ||||
| 	spin_lock_init(&dev_priv->rps_lock); | ||||
| @ -1586,8 +1602,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) | ||||
| 	/* Start out suspended */ | ||||
| 	dev_priv->mm.suspended = 1; | ||||
| 
 | ||||
| 	intel_detect_pch(dev); | ||||
| 
 | ||||
| 	if (drm_core_check_feature(dev, DRIVER_MODESET)) { | ||||
| 		ret = i915_load_modeset_init(dev); | ||||
| 		if (ret < 0) { | ||||
| @ -1622,13 +1636,16 @@ out_gem_unload: | ||||
| 	destroy_workqueue(dev_priv->wq); | ||||
| out_mtrrfree: | ||||
| 	if (dev_priv->mm.gtt_mtrr >= 0) { | ||||
| 		mtrr_del(dev_priv->mm.gtt_mtrr, dev->agp->base, | ||||
| 			 dev->agp->agp_info.aper_size * 1024 * 1024); | ||||
| 		mtrr_del(dev_priv->mm.gtt_mtrr, | ||||
| 			 dev_priv->mm.gtt_base_addr, | ||||
| 			 aperture_size); | ||||
| 		dev_priv->mm.gtt_mtrr = -1; | ||||
| 	} | ||||
| 	io_mapping_free(dev_priv->mm.gtt_mapping); | ||||
| out_rmmap: | ||||
| 	pci_iounmap(dev->pdev, dev_priv->regs); | ||||
| put_gmch: | ||||
| 	intel_gmch_remove(); | ||||
| put_bridge: | ||||
| 	pci_dev_put(dev_priv->bridge_dev); | ||||
| free_priv: | ||||
| @ -1660,8 +1677,9 @@ int i915_driver_unload(struct drm_device *dev) | ||||
| 
 | ||||
| 	io_mapping_free(dev_priv->mm.gtt_mapping); | ||||
| 	if (dev_priv->mm.gtt_mtrr >= 0) { | ||||
| 		mtrr_del(dev_priv->mm.gtt_mtrr, dev->agp->base, | ||||
| 			 dev->agp->agp_info.aper_size * 1024 * 1024); | ||||
| 		mtrr_del(dev_priv->mm.gtt_mtrr, | ||||
| 			 dev_priv->mm.gtt_base_addr, | ||||
| 			 dev_priv->mm.gtt->gtt_mappable_entries * PAGE_SIZE); | ||||
| 		dev_priv->mm.gtt_mtrr = -1; | ||||
| 	} | ||||
| 
 | ||||
| @ -1702,6 +1720,7 @@ int i915_driver_unload(struct drm_device *dev) | ||||
| 		mutex_lock(&dev->struct_mutex); | ||||
| 		i915_gem_free_all_phys_object(dev); | ||||
| 		i915_gem_cleanup_ringbuffer(dev); | ||||
| 		i915_gem_context_fini(dev); | ||||
| 		mutex_unlock(&dev->struct_mutex); | ||||
| 		i915_gem_cleanup_aliasing_ppgtt(dev); | ||||
| 		i915_gem_cleanup_stolen(dev); | ||||
| @ -1741,6 +1760,8 @@ int i915_driver_open(struct drm_device *dev, struct drm_file *file) | ||||
| 	spin_lock_init(&file_priv->mm.lock); | ||||
| 	INIT_LIST_HEAD(&file_priv->mm.request_list); | ||||
| 
 | ||||
| 	idr_init(&file_priv->context_idr); | ||||
| 
 | ||||
| 	return 0; | ||||
| } | ||||
| 
 | ||||
| @ -1760,7 +1781,13 @@ void i915_driver_lastclose(struct drm_device * dev) | ||||
| { | ||||
| 	drm_i915_private_t *dev_priv = dev->dev_private; | ||||
| 
 | ||||
| 	if (!dev_priv || drm_core_check_feature(dev, DRIVER_MODESET)) { | ||||
| 	/* On gen6+ we refuse to init without kms enabled, but then the drm core
 | ||||
| 	 * goes right around and calls lastclose. Check for this and don't clean | ||||
| 	 * up anything. */ | ||||
| 	if (!dev_priv) | ||||
| 		return; | ||||
| 
 | ||||
| 	if (drm_core_check_feature(dev, DRIVER_MODESET)) { | ||||
| 		intel_fb_restore_mode(dev); | ||||
| 		vga_switcheroo_process_delayed_switch(); | ||||
| 		return; | ||||
| @ -1773,6 +1800,7 @@ void i915_driver_lastclose(struct drm_device * dev) | ||||
| 
 | ||||
| void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv) | ||||
| { | ||||
| 	i915_gem_context_close(dev, file_priv); | ||||
| 	i915_gem_release(dev, file_priv); | ||||
| } | ||||
| 
 | ||||
| @ -1826,6 +1854,9 @@ struct drm_ioctl_desc i915_ioctls[] = { | ||||
| 	DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), | ||||
| 	DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), | ||||
| 	DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, intel_sprite_get_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), | ||||
| 	DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_UNLOCKED), | ||||
| 	DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_UNLOCKED), | ||||
| 	DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_UNLOCKED), | ||||
| }; | ||||
| 
 | ||||
| int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls); | ||||
|  | ||||
| @ -32,6 +32,7 @@ | ||||
| #include "drm.h" | ||||
| #include "i915_drm.h" | ||||
| #include "i915_drv.h" | ||||
| #include "i915_trace.h" | ||||
| #include "intel_drv.h" | ||||
| 
 | ||||
| #include <linux/console.h> | ||||
| @ -215,7 +216,6 @@ static const struct intel_device_info intel_ironlake_d_info = { | ||||
| 	.gen = 5, | ||||
| 	.need_gfx_hws = 1, .has_hotplug = 1, | ||||
| 	.has_bsd_ring = 1, | ||||
| 	.has_pch_split = 1, | ||||
| }; | ||||
| 
 | ||||
| static const struct intel_device_info intel_ironlake_m_info = { | ||||
| @ -223,7 +223,6 @@ static const struct intel_device_info intel_ironlake_m_info = { | ||||
| 	.need_gfx_hws = 1, .has_hotplug = 1, | ||||
| 	.has_fbc = 1, | ||||
| 	.has_bsd_ring = 1, | ||||
| 	.has_pch_split = 1, | ||||
| }; | ||||
| 
 | ||||
| static const struct intel_device_info intel_sandybridge_d_info = { | ||||
| @ -232,7 +231,6 @@ static const struct intel_device_info intel_sandybridge_d_info = { | ||||
| 	.has_bsd_ring = 1, | ||||
| 	.has_blt_ring = 1, | ||||
| 	.has_llc = 1, | ||||
| 	.has_pch_split = 1, | ||||
| 	.has_force_wake = 1, | ||||
| }; | ||||
| 
 | ||||
| @ -243,7 +241,6 @@ static const struct intel_device_info intel_sandybridge_m_info = { | ||||
| 	.has_bsd_ring = 1, | ||||
| 	.has_blt_ring = 1, | ||||
| 	.has_llc = 1, | ||||
| 	.has_pch_split = 1, | ||||
| 	.has_force_wake = 1, | ||||
| }; | ||||
| 
 | ||||
| @ -253,7 +250,6 @@ static const struct intel_device_info intel_ivybridge_d_info = { | ||||
| 	.has_bsd_ring = 1, | ||||
| 	.has_blt_ring = 1, | ||||
| 	.has_llc = 1, | ||||
| 	.has_pch_split = 1, | ||||
| 	.has_force_wake = 1, | ||||
| }; | ||||
| 
 | ||||
| @ -264,7 +260,6 @@ static const struct intel_device_info intel_ivybridge_m_info = { | ||||
| 	.has_bsd_ring = 1, | ||||
| 	.has_blt_ring = 1, | ||||
| 	.has_llc = 1, | ||||
| 	.has_pch_split = 1, | ||||
| 	.has_force_wake = 1, | ||||
| }; | ||||
| 
 | ||||
| @ -292,7 +287,6 @@ static const struct intel_device_info intel_haswell_d_info = { | ||||
| 	.has_bsd_ring = 1, | ||||
| 	.has_blt_ring = 1, | ||||
| 	.has_llc = 1, | ||||
| 	.has_pch_split = 1, | ||||
| 	.has_force_wake = 1, | ||||
| }; | ||||
| 
 | ||||
| @ -302,7 +296,6 @@ static const struct intel_device_info intel_haswell_m_info = { | ||||
| 	.has_bsd_ring = 1, | ||||
| 	.has_blt_ring = 1, | ||||
| 	.has_llc = 1, | ||||
| 	.has_pch_split = 1, | ||||
| 	.has_force_wake = 1, | ||||
| }; | ||||
| 
 | ||||
| @ -358,6 +351,9 @@ static const struct pci_device_id pciidlist[] = {		/* aka */ | ||||
| 	INTEL_VGA_DEVICE(0x0406, &intel_haswell_m_info), /* GT1 mobile */ | ||||
| 	INTEL_VGA_DEVICE(0x0416, &intel_haswell_m_info), /* GT2 mobile */ | ||||
| 	INTEL_VGA_DEVICE(0x0c16, &intel_haswell_d_info), /* SDV */ | ||||
| 	INTEL_VGA_DEVICE(0x0f30, &intel_valleyview_m_info), | ||||
| 	INTEL_VGA_DEVICE(0x0157, &intel_valleyview_m_info), | ||||
| 	INTEL_VGA_DEVICE(0x0155, &intel_valleyview_d_info), | ||||
| 	{0, 0, 0} | ||||
| }; | ||||
| 
 | ||||
| @ -429,135 +425,6 @@ bool i915_semaphore_is_enabled(struct drm_device *dev) | ||||
| 	return 1; | ||||
| } | ||||
| 
 | ||||
| void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv) | ||||
| { | ||||
| 	int count; | ||||
| 
 | ||||
| 	count = 0; | ||||
| 	while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1)) | ||||
| 		udelay(10); | ||||
| 
 | ||||
| 	I915_WRITE_NOTRACE(FORCEWAKE, 1); | ||||
| 	POSTING_READ(FORCEWAKE); | ||||
| 
 | ||||
| 	count = 0; | ||||
| 	while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1) == 0) | ||||
| 		udelay(10); | ||||
| } | ||||
| 
 | ||||
| void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv) | ||||
| { | ||||
| 	int count; | ||||
| 
 | ||||
| 	count = 0; | ||||
| 	while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_MT_ACK) & 1)) | ||||
| 		udelay(10); | ||||
| 
 | ||||
| 	I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_ENABLE(1)); | ||||
| 	POSTING_READ(FORCEWAKE_MT); | ||||
| 
 | ||||
| 	count = 0; | ||||
| 	while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_MT_ACK) & 1) == 0) | ||||
| 		udelay(10); | ||||
| } | ||||
| 
 | ||||
| /*
 | ||||
|  * Generally this is called implicitly by the register read function. However, | ||||
|  * if some sequence requires the GT to not power down then this function should | ||||
|  * be called at the beginning of the sequence followed by a call to | ||||
|  * gen6_gt_force_wake_put() at the end of the sequence. | ||||
|  */ | ||||
| void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv) | ||||
| { | ||||
| 	unsigned long irqflags; | ||||
| 
 | ||||
| 	spin_lock_irqsave(&dev_priv->gt_lock, irqflags); | ||||
| 	if (dev_priv->forcewake_count++ == 0) | ||||
| 		dev_priv->display.force_wake_get(dev_priv); | ||||
| 	spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags); | ||||
| } | ||||
| 
 | ||||
| static void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv) | ||||
| { | ||||
| 	u32 gtfifodbg; | ||||
| 	gtfifodbg = I915_READ_NOTRACE(GTFIFODBG); | ||||
| 	if (WARN(gtfifodbg & GT_FIFO_CPU_ERROR_MASK, | ||||
| 	     "MMIO read or write has been dropped %x\n", gtfifodbg)) | ||||
| 		I915_WRITE_NOTRACE(GTFIFODBG, GT_FIFO_CPU_ERROR_MASK); | ||||
| } | ||||
| 
 | ||||
| void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv) | ||||
| { | ||||
| 	I915_WRITE_NOTRACE(FORCEWAKE, 0); | ||||
| 	/* The below doubles as a POSTING_READ */ | ||||
| 	gen6_gt_check_fifodbg(dev_priv); | ||||
| } | ||||
| 
 | ||||
| void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv) | ||||
| { | ||||
| 	I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_DISABLE(1)); | ||||
| 	/* The below doubles as a POSTING_READ */ | ||||
| 	gen6_gt_check_fifodbg(dev_priv); | ||||
| } | ||||
| 
 | ||||
| /*
 | ||||
|  * see gen6_gt_force_wake_get() | ||||
|  */ | ||||
| void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv) | ||||
| { | ||||
| 	unsigned long irqflags; | ||||
| 
 | ||||
| 	spin_lock_irqsave(&dev_priv->gt_lock, irqflags); | ||||
| 	if (--dev_priv->forcewake_count == 0) | ||||
| 		dev_priv->display.force_wake_put(dev_priv); | ||||
| 	spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags); | ||||
| } | ||||
| 
 | ||||
| int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv) | ||||
| { | ||||
| 	int ret = 0; | ||||
| 
 | ||||
| 	if (dev_priv->gt_fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES) { | ||||
| 		int loop = 500; | ||||
| 		u32 fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES); | ||||
| 		while (fifo <= GT_FIFO_NUM_RESERVED_ENTRIES && loop--) { | ||||
| 			udelay(10); | ||||
| 			fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES); | ||||
| 		} | ||||
| 		if (WARN_ON(loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES)) | ||||
| 			++ret; | ||||
| 		dev_priv->gt_fifo_count = fifo; | ||||
| 	} | ||||
| 	dev_priv->gt_fifo_count--; | ||||
| 
 | ||||
| 	return ret; | ||||
| } | ||||
| 
 | ||||
| void vlv_force_wake_get(struct drm_i915_private *dev_priv) | ||||
| { | ||||
| 	int count; | ||||
| 
 | ||||
| 	count = 0; | ||||
| 
 | ||||
| 	/* Already awake? */ | ||||
| 	if ((I915_READ(0x130094) & 0xa1) == 0xa1) | ||||
| 		return; | ||||
| 
 | ||||
| 	I915_WRITE_NOTRACE(FORCEWAKE_VLV, 0xffffffff); | ||||
| 	POSTING_READ(FORCEWAKE_VLV); | ||||
| 
 | ||||
| 	count = 0; | ||||
| 	while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & 1) == 0) | ||||
| 		udelay(10); | ||||
| } | ||||
| 
 | ||||
| void vlv_force_wake_put(struct drm_i915_private *dev_priv) | ||||
| { | ||||
| 	I915_WRITE_NOTRACE(FORCEWAKE_VLV, 0xffff0000); | ||||
| 	/* FIXME: confirm VLV behavior with Punit folks */ | ||||
| 	POSTING_READ(FORCEWAKE_VLV); | ||||
| } | ||||
| 
 | ||||
| static int i915_drm_freeze(struct drm_device *dev) | ||||
| { | ||||
| 	struct drm_i915_private *dev_priv = dev->dev_private; | ||||
| @ -637,7 +504,7 @@ static int i915_drm_thaw(struct drm_device *dev) | ||||
| 
 | ||||
| 	/* KMS EnterVT equivalent */ | ||||
| 	if (drm_core_check_feature(dev, DRIVER_MODESET)) { | ||||
| 		if (HAS_PCH_SPLIT(dev)) | ||||
| 		if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) | ||||
| 			ironlake_init_pch_refclk(dev); | ||||
| 
 | ||||
| 		mutex_lock(&dev->struct_mutex); | ||||
| @ -794,9 +661,9 @@ static int gen6_do_reset(struct drm_device *dev) | ||||
| 
 | ||||
| 	/* If reset with a user forcewake, try to restore, otherwise turn it off */ | ||||
| 	if (dev_priv->forcewake_count) | ||||
| 		dev_priv->display.force_wake_get(dev_priv); | ||||
| 		dev_priv->gt.force_wake_get(dev_priv); | ||||
| 	else | ||||
| 		dev_priv->display.force_wake_put(dev_priv); | ||||
| 		dev_priv->gt.force_wake_put(dev_priv); | ||||
| 
 | ||||
| 	/* Restore fifo count */ | ||||
| 	dev_priv->gt_fifo_count = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES); | ||||
| @ -805,7 +672,7 @@ static int gen6_do_reset(struct drm_device *dev) | ||||
| 	return ret; | ||||
| } | ||||
| 
 | ||||
| static int intel_gpu_reset(struct drm_device *dev) | ||||
| int intel_gpu_reset(struct drm_device *dev) | ||||
| { | ||||
| 	struct drm_i915_private *dev_priv = dev->dev_private; | ||||
| 	int ret = -ENODEV; | ||||
| @ -863,10 +730,7 @@ int i915_reset(struct drm_device *dev) | ||||
| 	if (!i915_try_reset) | ||||
| 		return 0; | ||||
| 
 | ||||
| 	if (!mutex_trylock(&dev->struct_mutex)) | ||||
| 		return -EBUSY; | ||||
| 
 | ||||
| 	dev_priv->stop_rings = 0; | ||||
| 	mutex_lock(&dev->struct_mutex); | ||||
| 
 | ||||
| 	i915_gem_reset(dev); | ||||
| 
 | ||||
| @ -909,12 +773,16 @@ int i915_reset(struct drm_device *dev) | ||||
| 		for_each_ring(ring, dev_priv, i) | ||||
| 			ring->init(ring); | ||||
| 
 | ||||
| 		i915_gem_context_init(dev); | ||||
| 		i915_gem_init_ppgtt(dev); | ||||
| 
 | ||||
| 		mutex_unlock(&dev->struct_mutex); | ||||
| 		/*
 | ||||
| 		 * It would make sense to re-init all the other hw state, at | ||||
| 		 * least the rps/rc6/emon init done within modeset_init_hw. For | ||||
| 		 * some unknown reason, this blows up my ilk, so don't. | ||||
| 		 */ | ||||
| 
 | ||||
| 		if (drm_core_check_feature(dev, DRIVER_MODESET)) | ||||
| 			intel_modeset_init_hw(dev); | ||||
| 		mutex_unlock(&dev->struct_mutex); | ||||
| 
 | ||||
| 		drm_irq_uninstall(dev); | ||||
| 		drm_irq_install(dev); | ||||
| @ -925,10 +793,12 @@ int i915_reset(struct drm_device *dev) | ||||
| 	return 0; | ||||
| } | ||||
| 
 | ||||
| 
 | ||||
| static int __devinit | ||||
| i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | ||||
| { | ||||
| 	struct intel_device_info *intel_info = | ||||
| 		(struct intel_device_info *) ent->driver_data; | ||||
| 
 | ||||
| 	/* Only bind to function 0 of the device. Early generations
 | ||||
| 	 * used function 1 as a placeholder for multi-head. This causes | ||||
| 	 * us confusion instead, especially on the systems where both | ||||
| @ -937,6 +807,18 @@ i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | ||||
| 	if (PCI_FUNC(pdev->devfn)) | ||||
| 		return -ENODEV; | ||||
| 
 | ||||
| 	/* We've managed to ship a kms-enabled ddx that shipped with an XvMC
 | ||||
| 	 * implementation for gen3 (and only gen3) that used legacy drm maps | ||||
| 	 * (gasp!) to share buffers between X and the client. Hence we need to | ||||
| 	 * keep around the fake agp stuff for gen3, even when kms is enabled. */ | ||||
| 	if (intel_info->gen != 3) { | ||||
| 		driver.driver_features &= | ||||
| 			~(DRIVER_USE_AGP | DRIVER_REQUIRE_AGP); | ||||
| 	} else if (!intel_agp_enabled) { | ||||
| 		DRM_ERROR("drm/i915 can't work without intel_agp module!\n"); | ||||
| 		return -ENODEV; | ||||
| 	} | ||||
| 
 | ||||
| 	return drm_get_pci_dev(pdev, ent, &driver); | ||||
| } | ||||
| 
 | ||||
| @ -1058,7 +940,6 @@ static struct drm_driver driver = { | ||||
| 	.resume = i915_resume, | ||||
| 
 | ||||
| 	.device_is_agp = i915_driver_device_is_agp, | ||||
| 	.reclaim_buffers = drm_core_reclaim_buffers, | ||||
| 	.master_create = i915_master_create, | ||||
| 	.master_destroy = i915_master_destroy, | ||||
| #if defined(CONFIG_DEBUG_FS) | ||||
| @ -1097,11 +978,6 @@ static struct pci_driver i915_pci_driver = { | ||||
| 
 | ||||
| static int __init i915_init(void) | ||||
| { | ||||
| 	if (!intel_agp_enabled) { | ||||
| 		DRM_ERROR("drm/i915 can't work without intel_agp module!\n"); | ||||
| 		return -ENODEV; | ||||
| 	} | ||||
| 
 | ||||
| 	driver.num_ioctls = i915_max_ioctl; | ||||
| 
 | ||||
| 	/*
 | ||||
| @ -1149,6 +1025,84 @@ MODULE_LICENSE("GPL and additional rights"); | ||||
| 	 ((reg) < 0x40000) &&            \ | ||||
| 	 ((reg) != FORCEWAKE)) | ||||
| 
 | ||||
| static bool IS_DISPLAYREG(u32 reg) | ||||
| { | ||||
| 	/*
 | ||||
| 	 * This should make it easier to transition modules over to the | ||||
| 	 * new register block scheme, since we can do it incrementally. | ||||
| 	 */ | ||||
| 	if (reg >= 0x180000) | ||||
| 		return false; | ||||
| 
 | ||||
| 	if (reg >= RENDER_RING_BASE && | ||||
| 	    reg < RENDER_RING_BASE + 0xff) | ||||
| 		return false; | ||||
| 	if (reg >= GEN6_BSD_RING_BASE && | ||||
| 	    reg < GEN6_BSD_RING_BASE + 0xff) | ||||
| 		return false; | ||||
| 	if (reg >= BLT_RING_BASE && | ||||
| 	    reg < BLT_RING_BASE + 0xff) | ||||
| 		return false; | ||||
| 
 | ||||
| 	if (reg == PGTBL_ER) | ||||
| 		return false; | ||||
| 
 | ||||
| 	if (reg >= IPEIR_I965 && | ||||
| 	    reg < HWSTAM) | ||||
| 		return false; | ||||
| 
 | ||||
| 	if (reg == MI_MODE) | ||||
| 		return false; | ||||
| 
 | ||||
| 	if (reg == GFX_MODE_GEN7) | ||||
| 		return false; | ||||
| 
 | ||||
| 	if (reg == RENDER_HWS_PGA_GEN7 || | ||||
| 	    reg == BSD_HWS_PGA_GEN7 || | ||||
| 	    reg == BLT_HWS_PGA_GEN7) | ||||
| 		return false; | ||||
| 
 | ||||
| 	if (reg == GEN6_BSD_SLEEP_PSMI_CONTROL || | ||||
| 	    reg == GEN6_BSD_RNCID) | ||||
| 		return false; | ||||
| 
 | ||||
| 	if (reg == GEN6_BLITTER_ECOSKPD) | ||||
| 		return false; | ||||
| 
 | ||||
| 	if (reg >= 0x4000c && | ||||
| 	    reg <= 0x4002c) | ||||
| 		return false; | ||||
| 
 | ||||
| 	if (reg >= 0x4f000 && | ||||
| 	    reg <= 0x4f08f) | ||||
| 		return false; | ||||
| 
 | ||||
| 	if (reg >= 0x4f100 && | ||||
| 	    reg <= 0x4f11f) | ||||
| 		return false; | ||||
| 
 | ||||
| 	if (reg >= VLV_MASTER_IER && | ||||
| 	    reg <= GEN6_PMIER) | ||||
| 		return false; | ||||
| 
 | ||||
| 	if (reg >= FENCE_REG_SANDYBRIDGE_0 && | ||||
| 	    reg < (FENCE_REG_SANDYBRIDGE_0 + (16*8))) | ||||
| 		return false; | ||||
| 
 | ||||
| 	if (reg >= VLV_IIR_RW && | ||||
| 	    reg <= VLV_ISR) | ||||
| 		return false; | ||||
| 
 | ||||
| 	if (reg == FORCEWAKE_VLV || | ||||
| 	    reg == FORCEWAKE_ACK_VLV) | ||||
| 		return false; | ||||
| 
 | ||||
| 	if (reg == GEN6_GDRST) | ||||
| 		return false; | ||||
| 
 | ||||
| 	return true; | ||||
| } | ||||
| 
 | ||||
| #define __i915_read(x, y) \ | ||||
| u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \ | ||||
| 	u##x val = 0; \ | ||||
| @ -1156,11 +1110,13 @@ u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \ | ||||
| 		unsigned long irqflags; \ | ||||
| 		spin_lock_irqsave(&dev_priv->gt_lock, irqflags); \ | ||||
| 		if (dev_priv->forcewake_count == 0) \ | ||||
| 			dev_priv->display.force_wake_get(dev_priv); \ | ||||
| 			dev_priv->gt.force_wake_get(dev_priv); \ | ||||
| 		val = read##y(dev_priv->regs + reg); \ | ||||
| 		if (dev_priv->forcewake_count == 0) \ | ||||
| 			dev_priv->display.force_wake_put(dev_priv); \ | ||||
| 			dev_priv->gt.force_wake_put(dev_priv); \ | ||||
| 		spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags); \ | ||||
| 	} else if (IS_VALLEYVIEW(dev_priv->dev) && IS_DISPLAYREG(reg)) { \ | ||||
| 		val = read##y(dev_priv->regs + reg + 0x180000);		\ | ||||
| 	} else { \ | ||||
| 		val = read##y(dev_priv->regs + reg); \ | ||||
| 	} \ | ||||
| @ -1181,7 +1137,11 @@ void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \ | ||||
| 	if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \ | ||||
| 		__fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \ | ||||
| 	} \ | ||||
| 	write##y(val, dev_priv->regs + reg); \ | ||||
| 	if (IS_VALLEYVIEW(dev_priv->dev) && IS_DISPLAYREG(reg)) { \ | ||||
| 		write##y(val, dev_priv->regs + reg + 0x180000);		\ | ||||
| 	} else {							\ | ||||
| 		write##y(val, dev_priv->regs + reg);			\ | ||||
| 	}								\ | ||||
| 	if (unlikely(__fifo_ret)) { \ | ||||
| 		gen6_gt_check_fifodbg(dev_priv); \ | ||||
| 	} \ | ||||
|  | ||||
| @ -79,6 +79,10 @@ enum port { | ||||
| 
 | ||||
| #define for_each_pipe(p) for ((p) = 0; (p) < dev_priv->num_pipe; (p)++) | ||||
| 
 | ||||
| #define for_each_encoder_on_crtc(dev, __crtc, intel_encoder) \ | ||||
| 	list_for_each_entry((intel_encoder), &(dev)->mode_config.encoder_list, base.head) \ | ||||
| 		if ((intel_encoder)->base.crtc == (__crtc)) | ||||
| 
 | ||||
| struct intel_pch_pll { | ||||
| 	int refcount; /* count of number of CRTCs sharing this PLL */ | ||||
| 	int active; /* count of number of active CRTCs (i.e. DPMS on) */ | ||||
| @ -176,6 +180,7 @@ struct drm_i915_error_state { | ||||
| 	u32 eir; | ||||
| 	u32 pgtbl_er; | ||||
| 	u32 ier; | ||||
| 	u32 ccid; | ||||
| 	bool waiting[I915_NUM_RINGS]; | ||||
| 	u32 pipestat[I915_MAX_PIPES]; | ||||
| 	u32 tail[I915_NUM_RINGS]; | ||||
| @ -185,6 +190,7 @@ struct drm_i915_error_state { | ||||
| 	u32 instdone[I915_NUM_RINGS]; | ||||
| 	u32 acthd[I915_NUM_RINGS]; | ||||
| 	u32 semaphore_mboxes[I915_NUM_RINGS][I915_NUM_RINGS - 1]; | ||||
| 	u32 rc_psmi[I915_NUM_RINGS]; /* sleep state */ | ||||
| 	/* our own tracking of ring head and tail */ | ||||
| 	u32 cpu_ring_head[I915_NUM_RINGS]; | ||||
| 	u32 cpu_ring_tail[I915_NUM_RINGS]; | ||||
| @ -261,8 +267,6 @@ struct drm_i915_display_funcs { | ||||
| 			  struct drm_i915_gem_object *obj); | ||||
| 	int (*update_plane)(struct drm_crtc *crtc, struct drm_framebuffer *fb, | ||||
| 			    int x, int y); | ||||
| 	void (*force_wake_get)(struct drm_i915_private *dev_priv); | ||||
| 	void (*force_wake_put)(struct drm_i915_private *dev_priv); | ||||
| 	/* clock updates for mode set */ | ||||
| 	/* cursor updates */ | ||||
| 	/* render clock increase/decrease */ | ||||
| @ -270,6 +274,11 @@ struct drm_i915_display_funcs { | ||||
| 	/* pll clock increase/decrease */ | ||||
| }; | ||||
| 
 | ||||
| struct drm_i915_gt_funcs { | ||||
| 	void (*force_wake_get)(struct drm_i915_private *dev_priv); | ||||
| 	void (*force_wake_put)(struct drm_i915_private *dev_priv); | ||||
| }; | ||||
| 
 | ||||
| struct intel_device_info { | ||||
| 	u8 gen; | ||||
| 	u8 is_mobile:1; | ||||
| @ -284,7 +293,6 @@ struct intel_device_info { | ||||
| 	u8 is_crestline:1; | ||||
| 	u8 is_ivybridge:1; | ||||
| 	u8 is_valleyview:1; | ||||
| 	u8 has_pch_split:1; | ||||
| 	u8 has_force_wake:1; | ||||
| 	u8 is_haswell:1; | ||||
| 	u8 has_fbc:1; | ||||
| @ -309,6 +317,17 @@ struct i915_hw_ppgtt { | ||||
| 	dma_addr_t scratch_page_dma_addr; | ||||
| }; | ||||
| 
 | ||||
| 
 | ||||
| /* This must match up with the value previously used for execbuf2.rsvd1. */ | ||||
| #define DEFAULT_CONTEXT_ID 0 | ||||
| struct i915_hw_context { | ||||
| 	int id; | ||||
| 	bool is_initialized; | ||||
| 	struct drm_i915_file_private *file_priv; | ||||
| 	struct intel_ring_buffer *ring; | ||||
| 	struct drm_i915_gem_object *obj; | ||||
| }; | ||||
| 
 | ||||
| enum no_fbc_reason { | ||||
| 	FBC_NO_OUTPUT, /* no outputs enabled to compress */ | ||||
| 	FBC_STOLEN_TOO_SMALL, /* not enough space to hold compressed buffers */ | ||||
| @ -321,6 +340,7 @@ enum no_fbc_reason { | ||||
| }; | ||||
| 
 | ||||
| enum intel_pch { | ||||
| 	PCH_NONE = 0,	/* No PCH present */ | ||||
| 	PCH_IBX,	/* Ibexpeak PCH */ | ||||
| 	PCH_CPT,	/* Cougarpoint PCH */ | ||||
| 	PCH_LPT,	/* Lynxpoint PCH */ | ||||
| @ -350,6 +370,8 @@ typedef struct drm_i915_private { | ||||
| 	int relative_constants_mode; | ||||
| 
 | ||||
| 	void __iomem *regs; | ||||
| 
 | ||||
| 	struct drm_i915_gt_funcs gt; | ||||
| 	/** gt_fifo_count and the subsequent register write are synchronized
 | ||||
| 	 * with dev->struct_mutex. */ | ||||
| 	unsigned gt_fifo_count; | ||||
| @ -652,11 +674,14 @@ typedef struct drm_i915_private { | ||||
| 		unsigned long gtt_end; | ||||
| 
 | ||||
| 		struct io_mapping *gtt_mapping; | ||||
| 		phys_addr_t gtt_base_addr; | ||||
| 		int gtt_mtrr; | ||||
| 
 | ||||
| 		/** PPGTT used for aliasing the PPGTT with the GTT */ | ||||
| 		struct i915_hw_ppgtt *aliasing_ppgtt; | ||||
| 
 | ||||
| 		u32 *l3_remap_info; | ||||
| 
 | ||||
| 		struct shrinker inactive_shrinker; | ||||
| 
 | ||||
| 		/**
 | ||||
| @ -817,6 +842,10 @@ typedef struct drm_i915_private { | ||||
| 
 | ||||
| 	struct drm_property *broadcast_rgb_property; | ||||
| 	struct drm_property *force_audio_property; | ||||
| 
 | ||||
| 	struct work_struct parity_error_work; | ||||
| 	bool hw_contexts_disabled; | ||||
| 	uint32_t hw_context_size; | ||||
| } drm_i915_private_t; | ||||
| 
 | ||||
| /* Iterate over initialised rings */ | ||||
| @ -1026,6 +1055,7 @@ struct drm_i915_file_private { | ||||
| 		struct spinlock lock; | ||||
| 		struct list_head request_list; | ||||
| 	} mm; | ||||
| 	struct idr context_idr; | ||||
| }; | ||||
| 
 | ||||
| #define INTEL_INFO(dev)	(((struct drm_i915_private *) (dev)->dev_private)->info) | ||||
| @ -1071,7 +1101,8 @@ struct drm_i915_file_private { | ||||
| #define HAS_LLC(dev)            (INTEL_INFO(dev)->has_llc) | ||||
| #define I915_NEED_GFX_HWS(dev)	(INTEL_INFO(dev)->need_gfx_hws) | ||||
| 
 | ||||
| #define HAS_ALIASING_PPGTT(dev)	(INTEL_INFO(dev)->gen >=6) | ||||
| #define HAS_HW_CONTEXTS(dev)	(INTEL_INFO(dev)->gen >= 6) | ||||
| #define HAS_ALIASING_PPGTT(dev)	(INTEL_INFO(dev)->gen >=6 && !IS_VALLEYVIEW(dev)) | ||||
| 
 | ||||
| #define HAS_OVERLAY(dev)		(INTEL_INFO(dev)->has_overlay) | ||||
| #define OVERLAY_NEEDS_PHYSICAL(dev)	(INTEL_INFO(dev)->overlay_needs_physical) | ||||
| @ -1094,13 +1125,13 @@ struct drm_i915_file_private { | ||||
| #define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr) | ||||
| #define I915_HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc) | ||||
| 
 | ||||
| #define HAS_PCH_SPLIT(dev) (INTEL_INFO(dev)->has_pch_split) | ||||
| #define HAS_PIPE_CONTROL(dev) (INTEL_INFO(dev)->gen >= 5) | ||||
| 
 | ||||
| #define INTEL_PCH_TYPE(dev) (((struct drm_i915_private *)(dev)->dev_private)->pch_type) | ||||
| #define HAS_PCH_LPT(dev) (INTEL_PCH_TYPE(dev) == PCH_LPT) | ||||
| #define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT) | ||||
| #define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX) | ||||
| #define HAS_PCH_SPLIT(dev) (INTEL_PCH_TYPE(dev) != PCH_NONE) | ||||
| 
 | ||||
| #define HAS_FORCE_WAKE(dev) (INTEL_INFO(dev)->has_force_wake) | ||||
| 
 | ||||
| @ -1166,6 +1197,7 @@ extern long i915_compat_ioctl(struct file *filp, unsigned int cmd, | ||||
| extern int i915_emit_box(struct drm_device *dev, | ||||
| 			 struct drm_clip_rect *box, | ||||
| 			 int DR1, int DR4); | ||||
| extern int intel_gpu_reset(struct drm_device *dev); | ||||
| extern int i915_reset(struct drm_device *dev); | ||||
| extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv); | ||||
| extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv); | ||||
| @ -1178,6 +1210,7 @@ void i915_hangcheck_elapsed(unsigned long data); | ||||
| void i915_handle_error(struct drm_device *dev, bool wedged); | ||||
| 
 | ||||
| extern void intel_irq_init(struct drm_device *dev); | ||||
| extern void intel_gt_init(struct drm_device *dev); | ||||
| 
 | ||||
| void i915_error_state_free(struct kref *error_ref); | ||||
| 
 | ||||
| @ -1237,6 +1270,8 @@ int i915_gem_get_tiling(struct drm_device *dev, void *data, | ||||
| 			struct drm_file *file_priv); | ||||
| int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, | ||||
| 				struct drm_file *file_priv); | ||||
| int i915_gem_wait_ioctl(struct drm_device *dev, void *data, | ||||
| 			struct drm_file *file_priv); | ||||
| void i915_gem_load(struct drm_device *dev); | ||||
| int i915_gem_init_object(struct drm_gem_object *obj); | ||||
| int __must_check i915_gem_flush_ring(struct intel_ring_buffer *ring, | ||||
| @ -1306,6 +1341,8 @@ i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj) | ||||
| 
 | ||||
| void i915_gem_retire_requests(struct drm_device *dev); | ||||
| void i915_gem_retire_requests_ring(struct intel_ring_buffer *ring); | ||||
| int __must_check i915_gem_check_wedge(struct drm_i915_private *dev_priv, | ||||
| 				      bool interruptible); | ||||
| 
 | ||||
| void i915_gem_reset(struct drm_device *dev); | ||||
| void i915_gem_clflush_object(struct drm_i915_gem_object *obj); | ||||
| @ -1315,6 +1352,7 @@ int __must_check i915_gem_object_set_domain(struct drm_i915_gem_object *obj, | ||||
| int __must_check i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj); | ||||
| int __must_check i915_gem_init(struct drm_device *dev); | ||||
| int __must_check i915_gem_init_hw(struct drm_device *dev); | ||||
| void i915_gem_l3_remap(struct drm_device *dev); | ||||
| void i915_gem_init_swizzling(struct drm_device *dev); | ||||
| void i915_gem_init_ppgtt(struct drm_device *dev); | ||||
| void i915_gem_cleanup_ringbuffer(struct drm_device *dev); | ||||
| @ -1323,8 +1361,8 @@ int __must_check i915_gem_idle(struct drm_device *dev); | ||||
| int __must_check i915_add_request(struct intel_ring_buffer *ring, | ||||
| 				  struct drm_file *file, | ||||
| 				  struct drm_i915_gem_request *request); | ||||
| int __must_check i915_wait_request(struct intel_ring_buffer *ring, | ||||
| 				   uint32_t seqno); | ||||
| int __must_check i915_wait_seqno(struct intel_ring_buffer *ring, | ||||
| 				 uint32_t seqno); | ||||
| int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); | ||||
| int __must_check | ||||
| i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, | ||||
| @ -1358,6 +1396,16 @@ struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev, | ||||
| struct dma_buf *i915_gem_prime_export(struct drm_device *dev, | ||||
| 				struct drm_gem_object *gem_obj, int flags); | ||||
| 
 | ||||
| /* i915_gem_context.c */ | ||||
| void i915_gem_context_init(struct drm_device *dev); | ||||
| void i915_gem_context_fini(struct drm_device *dev); | ||||
| void i915_gem_context_close(struct drm_device *dev, struct drm_file *file); | ||||
| int i915_switch_context(struct intel_ring_buffer *ring, | ||||
| 			struct drm_file *file, int to_id); | ||||
| int i915_gem_context_create_ioctl(struct drm_device *dev, void *data, | ||||
| 				  struct drm_file *file); | ||||
| int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data, | ||||
| 				   struct drm_file *file); | ||||
| 
 | ||||
| /* i915_gem_gtt.c */ | ||||
| int __must_check i915_gem_init_aliasing_ppgtt(struct drm_device *dev); | ||||
| @ -1475,20 +1523,12 @@ extern bool intel_fbc_enabled(struct drm_device *dev); | ||||
| extern void intel_disable_fbc(struct drm_device *dev); | ||||
| extern bool ironlake_set_drps(struct drm_device *dev, u8 val); | ||||
| extern void ironlake_init_pch_refclk(struct drm_device *dev); | ||||
| extern void ironlake_enable_rc6(struct drm_device *dev); | ||||
| extern void gen6_set_rps(struct drm_device *dev, u8 val); | ||||
| extern void intel_detect_pch(struct drm_device *dev); | ||||
| extern int intel_trans_dp_port_sel(struct drm_crtc *crtc); | ||||
| extern int intel_enable_rc6(const struct drm_device *dev); | ||||
| 
 | ||||
| extern bool i915_semaphore_is_enabled(struct drm_device *dev); | ||||
| extern void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv); | ||||
| extern void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv); | ||||
| extern void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv); | ||||
| extern void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv); | ||||
| 
 | ||||
| extern void vlv_force_wake_get(struct drm_i915_private *dev_priv); | ||||
| extern void vlv_force_wake_put(struct drm_i915_private *dev_priv); | ||||
| 
 | ||||
| /* overlay */ | ||||
| #ifdef CONFIG_DEBUG_FS | ||||
|  | ||||
| @ -96,9 +96,18 @@ i915_gem_wait_for_error(struct drm_device *dev) | ||||
| 	if (!atomic_read(&dev_priv->mm.wedged)) | ||||
| 		return 0; | ||||
| 
 | ||||
| 	ret = wait_for_completion_interruptible(x); | ||||
| 	if (ret) | ||||
| 	/*
 | ||||
| 	 * Only wait 10 seconds for the gpu reset to complete to avoid hanging | ||||
| 	 * userspace. If it takes that long something really bad is going on and | ||||
| 	 * we should simply try to bail out and fail as gracefully as possible. | ||||
| 	 */ | ||||
| 	ret = wait_for_completion_interruptible_timeout(x, 10*HZ); | ||||
| 	if (ret == 0) { | ||||
| 		DRM_ERROR("Timed out waiting for the gpu reset to complete\n"); | ||||
| 		return -EIO; | ||||
| 	} else if (ret < 0) { | ||||
| 		return ret; | ||||
| 	} | ||||
| 
 | ||||
| 	if (atomic_read(&dev_priv->mm.wedged)) { | ||||
| 		/* GPU is hung, bump the completion count to account for
 | ||||
| @ -1122,7 +1131,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | ||||
| 
 | ||||
| 	obj->fault_mappable = true; | ||||
| 
 | ||||
| 	pfn = ((dev->agp->base + obj->gtt_offset) >> PAGE_SHIFT) + | ||||
| 	pfn = ((dev_priv->mm.gtt_base_addr + obj->gtt_offset) >> PAGE_SHIFT) + | ||||
| 		page_offset; | ||||
| 
 | ||||
| 	/* Finally, remap it using the new GTT offset */ | ||||
| @ -1132,6 +1141,11 @@ unlock: | ||||
| out: | ||||
| 	switch (ret) { | ||||
| 	case -EIO: | ||||
| 		/* If this -EIO is due to a gpu hang, give the reset code a
 | ||||
| 		 * chance to clean up the mess. Otherwise return the proper | ||||
| 		 * SIGBUS. */ | ||||
| 		if (!atomic_read(&dev_priv->mm.wedged)) | ||||
| 			return VM_FAULT_SIGBUS; | ||||
| 	case -EAGAIN: | ||||
| 		/* Give the error handler a chance to run and move the
 | ||||
| 		 * objects off the GPU active list. Next time we service the | ||||
| @ -1568,6 +1582,21 @@ i915_add_request(struct intel_ring_buffer *ring, | ||||
| 	int was_empty; | ||||
| 	int ret; | ||||
| 
 | ||||
| 	/*
 | ||||
| 	 * Emit any outstanding flushes - execbuf can fail to emit the flush | ||||
| 	 * after having emitted the batchbuffer command. Hence we need to fix | ||||
| 	 * things up similar to emitting the lazy request. The difference here | ||||
| 	 * is that the flush _must_ happen before the next request, no matter | ||||
| 	 * what. | ||||
| 	 */ | ||||
| 	if (ring->gpu_caches_dirty) { | ||||
| 		ret = i915_gem_flush_ring(ring, 0, I915_GEM_GPU_DOMAINS); | ||||
| 		if (ret) | ||||
| 			return ret; | ||||
| 
 | ||||
| 		ring->gpu_caches_dirty = false; | ||||
| 	} | ||||
| 
 | ||||
| 	BUG_ON(request == NULL); | ||||
| 	seqno = i915_gem_next_request_seqno(ring); | ||||
| 
 | ||||
| @ -1613,6 +1642,9 @@ i915_add_request(struct intel_ring_buffer *ring, | ||||
| 			queue_delayed_work(dev_priv->wq, | ||||
| 					   &dev_priv->mm.retire_work, HZ); | ||||
| 	} | ||||
| 
 | ||||
| 	WARN_ON(!list_empty(&ring->gpu_write_list)); | ||||
| 
 | ||||
| 	return 0; | ||||
| } | ||||
| 
 | ||||
| @ -1827,14 +1859,11 @@ i915_gem_retire_work_handler(struct work_struct *work) | ||||
| 	 */ | ||||
| 	idle = true; | ||||
| 	for_each_ring(ring, dev_priv, i) { | ||||
| 		if (!list_empty(&ring->gpu_write_list)) { | ||||
| 		if (ring->gpu_caches_dirty) { | ||||
| 			struct drm_i915_gem_request *request; | ||||
| 			int ret; | ||||
| 
 | ||||
| 			ret = i915_gem_flush_ring(ring, | ||||
| 						  0, I915_GEM_GPU_DOMAINS); | ||||
| 			request = kzalloc(sizeof(*request), GFP_KERNEL); | ||||
| 			if (ret || request == NULL || | ||||
| 			if (request == NULL || | ||||
| 			    i915_add_request(ring, NULL, request)) | ||||
| 			    kfree(request); | ||||
| 		} | ||||
| @ -1848,11 +1877,10 @@ i915_gem_retire_work_handler(struct work_struct *work) | ||||
| 	mutex_unlock(&dev->struct_mutex); | ||||
| } | ||||
| 
 | ||||
| static int | ||||
| i915_gem_check_wedge(struct drm_i915_private *dev_priv) | ||||
| int | ||||
| i915_gem_check_wedge(struct drm_i915_private *dev_priv, | ||||
| 		     bool interruptible) | ||||
| { | ||||
| 	BUG_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex)); | ||||
| 
 | ||||
| 	if (atomic_read(&dev_priv->mm.wedged)) { | ||||
| 		struct completion *x = &dev_priv->error_completion; | ||||
| 		bool recovery_complete; | ||||
| @ -1863,7 +1891,16 @@ i915_gem_check_wedge(struct drm_i915_private *dev_priv) | ||||
| 		recovery_complete = x->done > 0; | ||||
| 		spin_unlock_irqrestore(&x->wait.lock, flags); | ||||
| 
 | ||||
| 		return recovery_complete ? -EIO : -EAGAIN; | ||||
| 		/* Non-interruptible callers can't handle -EAGAIN, hence return
 | ||||
| 		 * -EIO unconditionally for these. */ | ||||
| 		if (!interruptible) | ||||
| 			return -EIO; | ||||
| 
 | ||||
| 		/* Recovery complete, but still wedged means reset failure. */ | ||||
| 		if (recovery_complete) | ||||
| 			return -EIO; | ||||
| 
 | ||||
| 		return -EAGAIN; | ||||
| 	} | ||||
| 
 | ||||
| 	return 0; | ||||
| @ -1899,34 +1936,85 @@ i915_gem_check_olr(struct intel_ring_buffer *ring, u32 seqno) | ||||
| 	return ret; | ||||
| } | ||||
| 
 | ||||
| /**
 | ||||
|  * __wait_seqno - wait until execution of seqno has finished | ||||
|  * @ring: the ring expected to report seqno | ||||
|  * @seqno: duh! | ||||
|  * @interruptible: do an interruptible wait (normally yes) | ||||
|  * @timeout: in - how long to wait (NULL forever); out - how much time remaining | ||||
|  * | ||||
|  * Returns 0 if the seqno was found within the alloted time. Else returns the | ||||
|  * errno with remaining time filled in timeout argument. | ||||
|  */ | ||||
| static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno, | ||||
| 			bool interruptible) | ||||
| 			bool interruptible, struct timespec *timeout) | ||||
| { | ||||
| 	drm_i915_private_t *dev_priv = ring->dev->dev_private; | ||||
| 	int ret = 0; | ||||
| 	struct timespec before, now, wait_time={1,0}; | ||||
| 	unsigned long timeout_jiffies; | ||||
| 	long end; | ||||
| 	bool wait_forever = true; | ||||
| 	int ret; | ||||
| 
 | ||||
| 	if (i915_seqno_passed(ring->get_seqno(ring), seqno)) | ||||
| 		return 0; | ||||
| 
 | ||||
| 	trace_i915_gem_request_wait_begin(ring, seqno); | ||||
| 
 | ||||
| 	if (timeout != NULL) { | ||||
| 		wait_time = *timeout; | ||||
| 		wait_forever = false; | ||||
| 	} | ||||
| 
 | ||||
| 	timeout_jiffies = timespec_to_jiffies(&wait_time); | ||||
| 
 | ||||
| 	if (WARN_ON(!ring->irq_get(ring))) | ||||
| 		return -ENODEV; | ||||
| 
 | ||||
| 	/* Record current time in case interrupted by signal, or wedged * */ | ||||
| 	getrawmonotonic(&before); | ||||
| 
 | ||||
| #define EXIT_COND \ | ||||
| 	(i915_seqno_passed(ring->get_seqno(ring), seqno) || \ | ||||
| 	atomic_read(&dev_priv->mm.wedged)) | ||||
| 	do { | ||||
| 		if (interruptible) | ||||
| 			end = wait_event_interruptible_timeout(ring->irq_queue, | ||||
| 							       EXIT_COND, | ||||
| 							       timeout_jiffies); | ||||
| 		else | ||||
| 			end = wait_event_timeout(ring->irq_queue, EXIT_COND, | ||||
| 						 timeout_jiffies); | ||||
| 
 | ||||
| 	if (interruptible) | ||||
| 		ret = wait_event_interruptible(ring->irq_queue, | ||||
| 					       EXIT_COND); | ||||
| 	else | ||||
| 		wait_event(ring->irq_queue, EXIT_COND); | ||||
| 		ret = i915_gem_check_wedge(dev_priv, interruptible); | ||||
| 		if (ret) | ||||
| 			end = ret; | ||||
| 	} while (end == 0 && wait_forever); | ||||
| 
 | ||||
| 	getrawmonotonic(&now); | ||||
| 
 | ||||
| 	ring->irq_put(ring); | ||||
| 	trace_i915_gem_request_wait_end(ring, seqno); | ||||
| #undef EXIT_COND | ||||
| 
 | ||||
| 	return ret; | ||||
| 	if (timeout) { | ||||
| 		struct timespec sleep_time = timespec_sub(now, before); | ||||
| 		*timeout = timespec_sub(*timeout, sleep_time); | ||||
| 	} | ||||
| 
 | ||||
| 	switch (end) { | ||||
| 	case -EIO: | ||||
| 	case -EAGAIN: /* Wedged */ | ||||
| 	case -ERESTARTSYS: /* Signal */ | ||||
| 		return (int)end; | ||||
| 	case 0: /* Timeout */ | ||||
| 		if (timeout) | ||||
| 			set_normalized_timespec(timeout, 0, 0); | ||||
| 		return -ETIME; | ||||
| 	default: /* Completed */ | ||||
| 		WARN_ON(end < 0); /* We're not aware of other errors */ | ||||
| 		return 0; | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| /**
 | ||||
| @ -1934,15 +2022,14 @@ static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno, | ||||
|  * request and object lists appropriately for that event. | ||||
|  */ | ||||
| int | ||||
| i915_wait_request(struct intel_ring_buffer *ring, | ||||
| 		  uint32_t seqno) | ||||
| i915_wait_seqno(struct intel_ring_buffer *ring, uint32_t seqno) | ||||
| { | ||||
| 	drm_i915_private_t *dev_priv = ring->dev->dev_private; | ||||
| 	int ret = 0; | ||||
| 
 | ||||
| 	BUG_ON(seqno == 0); | ||||
| 
 | ||||
| 	ret = i915_gem_check_wedge(dev_priv); | ||||
| 	ret = i915_gem_check_wedge(dev_priv, dev_priv->mm.interruptible); | ||||
| 	if (ret) | ||||
| 		return ret; | ||||
| 
 | ||||
| @ -1950,9 +2037,7 @@ i915_wait_request(struct intel_ring_buffer *ring, | ||||
| 	if (ret) | ||||
| 		return ret; | ||||
| 
 | ||||
| 	ret = __wait_seqno(ring, seqno, dev_priv->mm.interruptible); | ||||
| 	if (atomic_read(&dev_priv->mm.wedged)) | ||||
| 		ret = -EAGAIN; | ||||
| 	ret = __wait_seqno(ring, seqno, dev_priv->mm.interruptible, NULL); | ||||
| 
 | ||||
| 	return ret; | ||||
| } | ||||
| @ -1975,7 +2060,7 @@ i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj) | ||||
| 	 * it. | ||||
| 	 */ | ||||
| 	if (obj->active) { | ||||
| 		ret = i915_wait_request(obj->ring, obj->last_rendering_seqno); | ||||
| 		ret = i915_wait_seqno(obj->ring, obj->last_rendering_seqno); | ||||
| 		if (ret) | ||||
| 			return ret; | ||||
| 		i915_gem_retire_requests_ring(obj->ring); | ||||
| @ -1984,6 +2069,115 @@ i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj) | ||||
| 	return 0; | ||||
| } | ||||
| 
 | ||||
| /**
 | ||||
|  * Ensures that an object will eventually get non-busy by flushing any required | ||||
|  * write domains, emitting any outstanding lazy request and retiring and | ||||
|  * completed requests. | ||||
|  */ | ||||
| static int | ||||
| i915_gem_object_flush_active(struct drm_i915_gem_object *obj) | ||||
| { | ||||
| 	int ret; | ||||
| 
 | ||||
| 	if (obj->active) { | ||||
| 		ret = i915_gem_object_flush_gpu_write_domain(obj); | ||||
| 		if (ret) | ||||
| 			return ret; | ||||
| 
 | ||||
| 		ret = i915_gem_check_olr(obj->ring, | ||||
| 					 obj->last_rendering_seqno); | ||||
| 		if (ret) | ||||
| 			return ret; | ||||
| 		i915_gem_retire_requests_ring(obj->ring); | ||||
| 	} | ||||
| 
 | ||||
| 	return 0; | ||||
| } | ||||
| 
 | ||||
| /**
 | ||||
|  * i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT | ||||
|  * @DRM_IOCTL_ARGS: standard ioctl arguments | ||||
|  * | ||||
|  * Returns 0 if successful, else an error is returned with the remaining time in | ||||
|  * the timeout parameter. | ||||
|  *  -ETIME: object is still busy after timeout | ||||
|  *  -ERESTARTSYS: signal interrupted the wait | ||||
|  *  -ENONENT: object doesn't exist | ||||
|  * Also possible, but rare: | ||||
|  *  -EAGAIN: GPU wedged | ||||
|  *  -ENOMEM: damn | ||||
|  *  -ENODEV: Internal IRQ fail | ||||
|  *  -E?: The add request failed | ||||
|  * | ||||
|  * The wait ioctl with a timeout of 0 reimplements the busy ioctl. With any | ||||
|  * non-zero timeout parameter the wait ioctl will wait for the given number of | ||||
|  * nanoseconds on an object becoming unbusy. Since the wait itself does so | ||||
|  * without holding struct_mutex the object may become re-busied before this | ||||
|  * function completes. A similar but shorter * race condition exists in the busy | ||||
|  * ioctl | ||||
|  */ | ||||
| int | ||||
| i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file) | ||||
| { | ||||
| 	struct drm_i915_gem_wait *args = data; | ||||
| 	struct drm_i915_gem_object *obj; | ||||
| 	struct intel_ring_buffer *ring = NULL; | ||||
| 	struct timespec timeout_stack, *timeout = NULL; | ||||
| 	u32 seqno = 0; | ||||
| 	int ret = 0; | ||||
| 
 | ||||
| 	if (args->timeout_ns >= 0) { | ||||
| 		timeout_stack = ns_to_timespec(args->timeout_ns); | ||||
| 		timeout = &timeout_stack; | ||||
| 	} | ||||
| 
 | ||||
| 	ret = i915_mutex_lock_interruptible(dev); | ||||
| 	if (ret) | ||||
| 		return ret; | ||||
| 
 | ||||
| 	obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->bo_handle)); | ||||
| 	if (&obj->base == NULL) { | ||||
| 		mutex_unlock(&dev->struct_mutex); | ||||
| 		return -ENOENT; | ||||
| 	} | ||||
| 
 | ||||
| 	/* Need to make sure the object gets inactive eventually. */ | ||||
| 	ret = i915_gem_object_flush_active(obj); | ||||
| 	if (ret) | ||||
| 		goto out; | ||||
| 
 | ||||
| 	if (obj->active) { | ||||
| 		seqno = obj->last_rendering_seqno; | ||||
| 		ring = obj->ring; | ||||
| 	} | ||||
| 
 | ||||
| 	if (seqno == 0) | ||||
| 		 goto out; | ||||
| 
 | ||||
| 	/* Do this after OLR check to make sure we make forward progress polling
 | ||||
| 	 * on this IOCTL with a 0 timeout (like busy ioctl) | ||||
| 	 */ | ||||
| 	if (!args->timeout_ns) { | ||||
| 		ret = -ETIME; | ||||
| 		goto out; | ||||
| 	} | ||||
| 
 | ||||
| 	drm_gem_object_unreference(&obj->base); | ||||
| 	mutex_unlock(&dev->struct_mutex); | ||||
| 
 | ||||
| 	ret = __wait_seqno(ring, seqno, true, timeout); | ||||
| 	if (timeout) { | ||||
| 		WARN_ON(!timespec_valid(timeout)); | ||||
| 		args->timeout_ns = timespec_to_ns(timeout); | ||||
| 	} | ||||
| 	return ret; | ||||
| 
 | ||||
| out: | ||||
| 	drm_gem_object_unreference(&obj->base); | ||||
| 	mutex_unlock(&dev->struct_mutex); | ||||
| 	return ret; | ||||
| } | ||||
| 
 | ||||
| /**
 | ||||
|  * i915_gem_object_sync - sync an object to a ring. | ||||
|  * | ||||
| @ -2160,7 +2354,7 @@ static int i915_ring_idle(struct intel_ring_buffer *ring) | ||||
| 			return ret; | ||||
| 	} | ||||
| 
 | ||||
| 	return i915_wait_request(ring, i915_gem_next_request_seqno(ring)); | ||||
| 	return i915_wait_seqno(ring, i915_gem_next_request_seqno(ring)); | ||||
| } | ||||
| 
 | ||||
| int i915_gpu_idle(struct drm_device *dev) | ||||
| @ -2178,6 +2372,10 @@ int i915_gpu_idle(struct drm_device *dev) | ||||
| 		/* Is the device fubar? */ | ||||
| 		if (WARN_ON(!list_empty(&ring->gpu_write_list))) | ||||
| 			return -EBUSY; | ||||
| 
 | ||||
| 		ret = i915_switch_context(ring, NULL, DEFAULT_CONTEXT_ID); | ||||
| 		if (ret) | ||||
| 			return ret; | ||||
| 	} | ||||
| 
 | ||||
| 	return 0; | ||||
| @ -2364,7 +2562,7 @@ i915_gem_object_flush_fence(struct drm_i915_gem_object *obj) | ||||
| 	} | ||||
| 
 | ||||
| 	if (obj->last_fenced_seqno) { | ||||
| 		ret = i915_wait_request(obj->ring, obj->last_fenced_seqno); | ||||
| 		ret = i915_wait_seqno(obj->ring, obj->last_fenced_seqno); | ||||
| 		if (ret) | ||||
| 			return ret; | ||||
| 
 | ||||
| @ -2551,8 +2749,8 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj, | ||||
| 	if (map_and_fenceable) | ||||
| 		free_space = | ||||
| 			drm_mm_search_free_in_range(&dev_priv->mm.gtt_space, | ||||
| 						    size, alignment, 0, | ||||
| 						    dev_priv->mm.gtt_mappable_end, | ||||
| 						    size, alignment, | ||||
| 						    0, dev_priv->mm.gtt_mappable_end, | ||||
| 						    0); | ||||
| 	else | ||||
| 		free_space = drm_mm_search_free(&dev_priv->mm.gtt_space, | ||||
| @ -2563,7 +2761,7 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj, | ||||
| 			obj->gtt_space = | ||||
| 				drm_mm_get_block_range_generic(free_space, | ||||
| 							       size, alignment, 0, | ||||
| 							       dev_priv->mm.gtt_mappable_end, | ||||
| 							       0, dev_priv->mm.gtt_mappable_end, | ||||
| 							       0); | ||||
| 		else | ||||
| 			obj->gtt_space = | ||||
| @ -3030,7 +3228,7 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file) | ||||
| 	if (seqno == 0) | ||||
| 		return 0; | ||||
| 
 | ||||
| 	ret = __wait_seqno(ring, seqno, true); | ||||
| 	ret = __wait_seqno(ring, seqno, true, NULL); | ||||
| 	if (ret == 0) | ||||
| 		queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0); | ||||
| 
 | ||||
| @ -3199,30 +3397,9 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data, | ||||
| 	 * become non-busy without any further actions, therefore emit any | ||||
| 	 * necessary flushes here. | ||||
| 	 */ | ||||
| 	ret = i915_gem_object_flush_active(obj); | ||||
| 
 | ||||
| 	args->busy = obj->active; | ||||
| 	if (args->busy) { | ||||
| 		/* Unconditionally flush objects, even when the gpu still uses this
 | ||||
| 		 * object. Userspace calling this function indicates that it wants to | ||||
| 		 * use this buffer rather sooner than later, so issuing the required | ||||
| 		 * flush earlier is beneficial. | ||||
| 		 */ | ||||
| 		if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) { | ||||
| 			ret = i915_gem_flush_ring(obj->ring, | ||||
| 						  0, obj->base.write_domain); | ||||
| 		} else { | ||||
| 			ret = i915_gem_check_olr(obj->ring, | ||||
| 						 obj->last_rendering_seqno); | ||||
| 		} | ||||
| 
 | ||||
| 		/* Update the active list for the hardware's current position.
 | ||||
| 		 * Otherwise this only updates on a delayed timer or when irqs | ||||
| 		 * are actually unmasked, and our working set ends up being | ||||
| 		 * larger than required. | ||||
| 		 */ | ||||
| 		i915_gem_retire_requests_ring(obj->ring); | ||||
| 
 | ||||
| 		args->busy = obj->active; | ||||
| 	} | ||||
| 
 | ||||
| 	drm_gem_object_unreference(&obj->base); | ||||
| unlock: | ||||
| @ -3435,6 +3612,38 @@ i915_gem_idle(struct drm_device *dev) | ||||
| 	return 0; | ||||
| } | ||||
| 
 | ||||
| void i915_gem_l3_remap(struct drm_device *dev) | ||||
| { | ||||
| 	drm_i915_private_t *dev_priv = dev->dev_private; | ||||
| 	u32 misccpctl; | ||||
| 	int i; | ||||
| 
 | ||||
| 	if (!IS_IVYBRIDGE(dev)) | ||||
| 		return; | ||||
| 
 | ||||
| 	if (!dev_priv->mm.l3_remap_info) | ||||
| 		return; | ||||
| 
 | ||||
| 	misccpctl = I915_READ(GEN7_MISCCPCTL); | ||||
| 	I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE); | ||||
| 	POSTING_READ(GEN7_MISCCPCTL); | ||||
| 
 | ||||
| 	for (i = 0; i < GEN7_L3LOG_SIZE; i += 4) { | ||||
| 		u32 remap = I915_READ(GEN7_L3LOG_BASE + i); | ||||
| 		if (remap && remap != dev_priv->mm.l3_remap_info[i/4]) | ||||
| 			DRM_DEBUG("0x%x was already programmed to %x\n", | ||||
| 				  GEN7_L3LOG_BASE + i, remap); | ||||
| 		if (remap && !dev_priv->mm.l3_remap_info[i/4]) | ||||
| 			DRM_DEBUG_DRIVER("Clearing remapped register\n"); | ||||
| 		I915_WRITE(GEN7_L3LOG_BASE + i, dev_priv->mm.l3_remap_info[i/4]); | ||||
| 	} | ||||
| 
 | ||||
| 	/* Make sure all the writes land before disabling dop clock gating */ | ||||
| 	POSTING_READ(GEN7_L3LOG_BASE); | ||||
| 
 | ||||
| 	I915_WRITE(GEN7_MISCCPCTL, misccpctl); | ||||
| } | ||||
| 
 | ||||
| void i915_gem_init_swizzling(struct drm_device *dev) | ||||
| { | ||||
| 	drm_i915_private_t *dev_priv = dev->dev_private; | ||||
| @ -3518,12 +3727,33 @@ void i915_gem_init_ppgtt(struct drm_device *dev) | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| static bool | ||||
| intel_enable_blt(struct drm_device *dev) | ||||
| { | ||||
| 	if (!HAS_BLT(dev)) | ||||
| 		return false; | ||||
| 
 | ||||
| 	/* The blitter was dysfunctional on early prototypes */ | ||||
| 	if (IS_GEN6(dev) && dev->pdev->revision < 8) { | ||||
| 		DRM_INFO("BLT not supported on this pre-production hardware;" | ||||
| 			 " graphics performance will be degraded.\n"); | ||||
| 		return false; | ||||
| 	} | ||||
| 
 | ||||
| 	return true; | ||||
| } | ||||
| 
 | ||||
| int | ||||
| i915_gem_init_hw(struct drm_device *dev) | ||||
| { | ||||
| 	drm_i915_private_t *dev_priv = dev->dev_private; | ||||
| 	int ret; | ||||
| 
 | ||||
| 	if (!intel_enable_gtt()) | ||||
| 		return -EIO; | ||||
| 
 | ||||
| 	i915_gem_l3_remap(dev); | ||||
| 
 | ||||
| 	i915_gem_init_swizzling(dev); | ||||
| 
 | ||||
| 	ret = intel_init_render_ring_buffer(dev); | ||||
| @ -3536,7 +3766,7 @@ i915_gem_init_hw(struct drm_device *dev) | ||||
| 			goto cleanup_render_ring; | ||||
| 	} | ||||
| 
 | ||||
| 	if (HAS_BLT(dev)) { | ||||
| 	if (intel_enable_blt(dev)) { | ||||
| 		ret = intel_init_blt_ring_buffer(dev); | ||||
| 		if (ret) | ||||
| 			goto cleanup_bsd_ring; | ||||
| @ -3544,6 +3774,11 @@ i915_gem_init_hw(struct drm_device *dev) | ||||
| 
 | ||||
| 	dev_priv->next_seqno = 1; | ||||
| 
 | ||||
| 	/*
 | ||||
| 	 * XXX: There was some w/a described somewhere suggesting loading | ||||
| 	 * contexts before PPGTT. | ||||
| 	 */ | ||||
| 	i915_gem_context_init(dev); | ||||
| 	i915_gem_init_ppgtt(dev); | ||||
| 
 | ||||
| 	return 0; | ||||
|  | ||||
							
								
								
									
										536
									
								
								drivers/gpu/drm/i915/i915_gem_context.c
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										536
									
								
								drivers/gpu/drm/i915/i915_gem_context.c
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,536 @@ | ||||
| /*
 | ||||
|  * Copyright © 2011-2012 Intel Corporation | ||||
|  * | ||||
|  * Permission is hereby granted, free of charge, to any person obtaining a | ||||
|  * copy of this software and associated documentation files (the "Software"), | ||||
|  * to deal in the Software without restriction, including without limitation | ||||
|  * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||||
|  * and/or sell copies of the Software, and to permit persons to whom the | ||||
|  * Software is furnished to do so, subject to the following conditions: | ||||
|  * | ||||
|  * The above copyright notice and this permission notice (including the next | ||||
|  * paragraph) shall be included in all copies or substantial portions of the | ||||
|  * Software. | ||||
|  * | ||||
|  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||||
|  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||||
|  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL | ||||
|  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||||
|  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||||
|  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | ||||
|  * IN THE SOFTWARE. | ||||
|  * | ||||
|  * Authors: | ||||
|  *    Ben Widawsky <ben@bwidawsk.net> | ||||
|  * | ||||
|  */ | ||||
| 
 | ||||
| /*
 | ||||
|  * This file implements HW context support. On gen5+ a HW context consists of an | ||||
|  * opaque GPU object which is referenced at times of context saves and restores. | ||||
|  * With RC6 enabled, the context is also referenced as the GPU enters and exists | ||||
|  * from RC6 (GPU has it's own internal power context, except on gen5). Though | ||||
|  * something like a context does exist for the media ring, the code only | ||||
|  * supports contexts for the render ring. | ||||
|  * | ||||
|  * In software, there is a distinction between contexts created by the user, | ||||
|  * and the default HW context. The default HW context is used by GPU clients | ||||
|  * that do not request setup of their own hardware context. The default | ||||
|  * context's state is never restored to help prevent programming errors. This | ||||
|  * would happen if a client ran and piggy-backed off another clients GPU state. | ||||
|  * The default context only exists to give the GPU some offset to load as the | ||||
|  * current to invoke a save of the context we actually care about. In fact, the | ||||
|  * code could likely be constructed, albeit in a more complicated fashion, to | ||||
|  * never use the default context, though that limits the driver's ability to | ||||
|  * swap out, and/or destroy other contexts. | ||||
|  * | ||||
|  * All other contexts are created as a request by the GPU client. These contexts | ||||
|  * store GPU state, and thus allow GPU clients to not re-emit state (and | ||||
|  * potentially query certain state) at any time. The kernel driver makes | ||||
|  * certain that the appropriate commands are inserted. | ||||
|  * | ||||
|  * The context life cycle is semi-complicated in that context BOs may live | ||||
|  * longer than the context itself because of the way the hardware, and object | ||||
|  * tracking works. Below is a very crude representation of the state machine | ||||
|  * describing the context life. | ||||
|  *                                         refcount     pincount     active | ||||
|  * S0: initial state                          0            0           0 | ||||
|  * S1: context created                        1            0           0 | ||||
|  * S2: context is currently running           2            1           X | ||||
|  * S3: GPU referenced, but not current        2            0           1 | ||||
|  * S4: context is current, but destroyed      1            1           0 | ||||
|  * S5: like S3, but destroyed                 1            0           1 | ||||
|  * | ||||
|  * The most common (but not all) transitions: | ||||
|  * S0->S1: client creates a context | ||||
|  * S1->S2: client submits execbuf with context | ||||
|  * S2->S3: other clients submits execbuf with context | ||||
|  * S3->S1: context object was retired | ||||
|  * S3->S2: clients submits another execbuf | ||||
|  * S2->S4: context destroy called with current context | ||||
|  * S3->S5->S0: destroy path | ||||
|  * S4->S5->S0: destroy path on current context | ||||
|  * | ||||
|  * There are two confusing terms used above: | ||||
|  *  The "current context" means the context which is currently running on the | ||||
|  *  GPU. The GPU has loaded it's state already and has stored away the gtt | ||||
|  *  offset of the BO. The GPU is not actively referencing the data at this | ||||
|  *  offset, but it will on the next context switch. The only way to avoid this | ||||
|  *  is to do a GPU reset. | ||||
|  * | ||||
|  *  An "active context' is one which was previously the "current context" and is | ||||
|  *  on the active list waiting for the next context switch to occur. Until this | ||||
|  *  happens, the object must remain at the same gtt offset. It is therefore | ||||
|  *  possible to destroy a context, but it is still active. | ||||
|  * | ||||
|  */ | ||||
| 
 | ||||
| #include "drmP.h" | ||||
| #include "i915_drm.h" | ||||
| #include "i915_drv.h" | ||||
| 
 | ||||
| /* This is a HW constraint. The value below is the largest known requirement
 | ||||
|  * I've seen in a spec to date, and that was a workaround for a non-shipping | ||||
|  * part. It should be safe to decrease this, but it's more future proof as is. | ||||
|  */ | ||||
| #define CONTEXT_ALIGN (64<<10) | ||||
| 
 | ||||
| static struct i915_hw_context * | ||||
| i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id); | ||||
| static int do_switch(struct drm_i915_gem_object *from_obj, | ||||
| 		     struct i915_hw_context *to, u32 seqno); | ||||
| 
 | ||||
| static int get_context_size(struct drm_device *dev) | ||||
| { | ||||
| 	struct drm_i915_private *dev_priv = dev->dev_private; | ||||
| 	int ret; | ||||
| 	u32 reg; | ||||
| 
 | ||||
| 	switch (INTEL_INFO(dev)->gen) { | ||||
| 	case 6: | ||||
| 		reg = I915_READ(CXT_SIZE); | ||||
| 		ret = GEN6_CXT_TOTAL_SIZE(reg) * 64; | ||||
| 		break; | ||||
| 	case 7: | ||||
| 		reg = I915_READ(GEN7_CXT_SIZE); | ||||
| 		ret = GEN7_CXT_TOTAL_SIZE(reg) * 64; | ||||
| 		break; | ||||
| 	default: | ||||
| 		BUG(); | ||||
| 	} | ||||
| 
 | ||||
| 	return ret; | ||||
| } | ||||
| 
 | ||||
| static void do_destroy(struct i915_hw_context *ctx) | ||||
| { | ||||
| 	struct drm_device *dev = ctx->obj->base.dev; | ||||
| 	struct drm_i915_private *dev_priv = dev->dev_private; | ||||
| 
 | ||||
| 	if (ctx->file_priv) | ||||
| 		idr_remove(&ctx->file_priv->context_idr, ctx->id); | ||||
| 	else | ||||
| 		BUG_ON(ctx != dev_priv->ring[RCS].default_context); | ||||
| 
 | ||||
| 	drm_gem_object_unreference(&ctx->obj->base); | ||||
| 	kfree(ctx); | ||||
| } | ||||
| 
 | ||||
| static struct i915_hw_context * | ||||
| create_hw_context(struct drm_device *dev, | ||||
| 		  struct drm_i915_file_private *file_priv) | ||||
| { | ||||
| 	struct drm_i915_private *dev_priv = dev->dev_private; | ||||
| 	struct i915_hw_context *ctx; | ||||
| 	int ret, id; | ||||
| 
 | ||||
| 	ctx = kzalloc(sizeof(struct drm_i915_file_private), GFP_KERNEL); | ||||
| 	if (ctx == NULL) | ||||
| 		return ERR_PTR(-ENOMEM); | ||||
| 
 | ||||
| 	ctx->obj = i915_gem_alloc_object(dev, dev_priv->hw_context_size); | ||||
| 	if (ctx->obj == NULL) { | ||||
| 		kfree(ctx); | ||||
| 		DRM_DEBUG_DRIVER("Context object allocated failed\n"); | ||||
| 		return ERR_PTR(-ENOMEM); | ||||
| 	} | ||||
| 
 | ||||
| 	/* The ring associated with the context object is handled by the normal
 | ||||
| 	 * object tracking code. We give an initial ring value simple to pass an | ||||
| 	 * assertion in the context switch code. | ||||
| 	 */ | ||||
| 	ctx->ring = &dev_priv->ring[RCS]; | ||||
| 
 | ||||
| 	/* Default context will never have a file_priv */ | ||||
| 	if (file_priv == NULL) | ||||
| 		return ctx; | ||||
| 
 | ||||
| 	ctx->file_priv = file_priv; | ||||
| 
 | ||||
| again: | ||||
| 	if (idr_pre_get(&file_priv->context_idr, GFP_KERNEL) == 0) { | ||||
| 		ret = -ENOMEM; | ||||
| 		DRM_DEBUG_DRIVER("idr allocation failed\n"); | ||||
| 		goto err_out; | ||||
| 	} | ||||
| 
 | ||||
| 	ret = idr_get_new_above(&file_priv->context_idr, ctx, | ||||
| 				DEFAULT_CONTEXT_ID + 1, &id); | ||||
| 	if (ret == 0) | ||||
| 		ctx->id = id; | ||||
| 
 | ||||
| 	if (ret == -EAGAIN) | ||||
| 		goto again; | ||||
| 	else if (ret) | ||||
| 		goto err_out; | ||||
| 
 | ||||
| 	return ctx; | ||||
| 
 | ||||
| err_out: | ||||
| 	do_destroy(ctx); | ||||
| 	return ERR_PTR(ret); | ||||
| } | ||||
| 
 | ||||
| static inline bool is_default_context(struct i915_hw_context *ctx) | ||||
| { | ||||
| 	return (ctx == ctx->ring->default_context); | ||||
| } | ||||
| 
 | ||||
| /**
 | ||||
|  * The default context needs to exist per ring that uses contexts. It stores the | ||||
|  * context state of the GPU for applications that don't utilize HW contexts, as | ||||
|  * well as an idle case. | ||||
|  */ | ||||
| static int create_default_context(struct drm_i915_private *dev_priv) | ||||
| { | ||||
| 	struct i915_hw_context *ctx; | ||||
| 	int ret; | ||||
| 
 | ||||
| 	BUG_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex)); | ||||
| 
 | ||||
| 	ctx = create_hw_context(dev_priv->dev, NULL); | ||||
| 	if (IS_ERR(ctx)) | ||||
| 		return PTR_ERR(ctx); | ||||
| 
 | ||||
| 	/* We may need to do things with the shrinker which require us to
 | ||||
| 	 * immediately switch back to the default context. This can cause a | ||||
| 	 * problem as pinning the default context also requires GTT space which | ||||
| 	 * may not be available. To avoid this we always pin the | ||||
| 	 * default context. | ||||
| 	 */ | ||||
| 	dev_priv->ring[RCS].default_context = ctx; | ||||
| 	ret = i915_gem_object_pin(ctx->obj, CONTEXT_ALIGN, false); | ||||
| 	if (ret) { | ||||
| 		do_destroy(ctx); | ||||
| 		return ret; | ||||
| 	} | ||||
| 
 | ||||
| 	ret = do_switch(NULL, ctx, 0); | ||||
| 	if (ret) { | ||||
| 		i915_gem_object_unpin(ctx->obj); | ||||
| 		do_destroy(ctx); | ||||
| 	} else { | ||||
| 		DRM_DEBUG_DRIVER("Default HW context loaded\n"); | ||||
| 	} | ||||
| 
 | ||||
| 	return ret; | ||||
| } | ||||
| 
 | ||||
| void i915_gem_context_init(struct drm_device *dev) | ||||
| { | ||||
| 	struct drm_i915_private *dev_priv = dev->dev_private; | ||||
| 	uint32_t ctx_size; | ||||
| 
 | ||||
| 	if (!HAS_HW_CONTEXTS(dev)) { | ||||
| 		dev_priv->hw_contexts_disabled = true; | ||||
| 		return; | ||||
| 	} | ||||
| 
 | ||||
| 	/* If called from reset, or thaw... we've been here already */ | ||||
| 	if (dev_priv->hw_contexts_disabled || | ||||
| 	    dev_priv->ring[RCS].default_context) | ||||
| 		return; | ||||
| 
 | ||||
| 	ctx_size = get_context_size(dev); | ||||
| 	dev_priv->hw_context_size = get_context_size(dev); | ||||
| 	dev_priv->hw_context_size = round_up(dev_priv->hw_context_size, 4096); | ||||
| 
 | ||||
| 	if (ctx_size <= 0 || ctx_size > (1<<20)) { | ||||
| 		dev_priv->hw_contexts_disabled = true; | ||||
| 		return; | ||||
| 	} | ||||
| 
 | ||||
| 	if (create_default_context(dev_priv)) { | ||||
| 		dev_priv->hw_contexts_disabled = true; | ||||
| 		return; | ||||
| 	} | ||||
| 
 | ||||
| 	DRM_DEBUG_DRIVER("HW context support initialized\n"); | ||||
| } | ||||
| 
 | ||||
| void i915_gem_context_fini(struct drm_device *dev) | ||||
| { | ||||
| 	struct drm_i915_private *dev_priv = dev->dev_private; | ||||
| 
 | ||||
| 	if (dev_priv->hw_contexts_disabled) | ||||
| 		return; | ||||
| 
 | ||||
| 	/* The only known way to stop the gpu from accessing the hw context is
 | ||||
| 	 * to reset it. Do this as the very last operation to avoid confusing | ||||
| 	 * other code, leading to spurious errors. */ | ||||
| 	intel_gpu_reset(dev); | ||||
| 
 | ||||
| 	i915_gem_object_unpin(dev_priv->ring[RCS].default_context->obj); | ||||
| 
 | ||||
| 	do_destroy(dev_priv->ring[RCS].default_context); | ||||
| } | ||||
| 
 | ||||
| static int context_idr_cleanup(int id, void *p, void *data) | ||||
| { | ||||
| 	struct i915_hw_context *ctx = p; | ||||
| 
 | ||||
| 	BUG_ON(id == DEFAULT_CONTEXT_ID); | ||||
| 
 | ||||
| 	do_destroy(ctx); | ||||
| 
 | ||||
| 	return 0; | ||||
| } | ||||
| 
 | ||||
| void i915_gem_context_close(struct drm_device *dev, struct drm_file *file) | ||||
| { | ||||
| 	struct drm_i915_file_private *file_priv = file->driver_priv; | ||||
| 
 | ||||
| 	mutex_lock(&dev->struct_mutex); | ||||
| 	idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL); | ||||
| 	idr_destroy(&file_priv->context_idr); | ||||
| 	mutex_unlock(&dev->struct_mutex); | ||||
| } | ||||
| 
 | ||||
| static struct i915_hw_context * | ||||
| i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id) | ||||
| { | ||||
| 	return (struct i915_hw_context *)idr_find(&file_priv->context_idr, id); | ||||
| } | ||||
| 
 | ||||
| static inline int | ||||
| mi_set_context(struct intel_ring_buffer *ring, | ||||
| 	       struct i915_hw_context *new_context, | ||||
| 	       u32 hw_flags) | ||||
| { | ||||
| 	int ret; | ||||
| 
 | ||||
| 	/* w/a: If Flush TLB Invalidation Mode is enabled, driver must do a TLB
 | ||||
| 	 * invalidation prior to MI_SET_CONTEXT. On GEN6 we don't set the value | ||||
| 	 * explicitly, so we rely on the value at ring init, stored in | ||||
| 	 * itlb_before_ctx_switch. | ||||
| 	 */ | ||||
| 	if (IS_GEN6(ring->dev) && ring->itlb_before_ctx_switch) { | ||||
| 		ret = ring->flush(ring, 0, 0); | ||||
| 		if (ret) | ||||
| 			return ret; | ||||
| 	} | ||||
| 
 | ||||
| 	ret = intel_ring_begin(ring, 6); | ||||
| 	if (ret) | ||||
| 		return ret; | ||||
| 
 | ||||
| 	if (IS_GEN7(ring->dev)) | ||||
| 		intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_DISABLE); | ||||
| 	else | ||||
| 		intel_ring_emit(ring, MI_NOOP); | ||||
| 
 | ||||
| 	intel_ring_emit(ring, MI_NOOP); | ||||
| 	intel_ring_emit(ring, MI_SET_CONTEXT); | ||||
| 	intel_ring_emit(ring, new_context->obj->gtt_offset | | ||||
| 			MI_MM_SPACE_GTT | | ||||
| 			MI_SAVE_EXT_STATE_EN | | ||||
| 			MI_RESTORE_EXT_STATE_EN | | ||||
| 			hw_flags); | ||||
| 	/* w/a: MI_SET_CONTEXT must always be followed by MI_NOOP */ | ||||
| 	intel_ring_emit(ring, MI_NOOP); | ||||
| 
 | ||||
| 	if (IS_GEN7(ring->dev)) | ||||
| 		intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_ENABLE); | ||||
| 	else | ||||
| 		intel_ring_emit(ring, MI_NOOP); | ||||
| 
 | ||||
| 	intel_ring_advance(ring); | ||||
| 
 | ||||
| 	return ret; | ||||
| } | ||||
| 
 | ||||
| static int do_switch(struct drm_i915_gem_object *from_obj, | ||||
| 		     struct i915_hw_context *to, | ||||
| 		     u32 seqno) | ||||
| { | ||||
| 	struct intel_ring_buffer *ring = NULL; | ||||
| 	u32 hw_flags = 0; | ||||
| 	int ret; | ||||
| 
 | ||||
| 	BUG_ON(to == NULL); | ||||
| 	BUG_ON(from_obj != NULL && from_obj->pin_count == 0); | ||||
| 
 | ||||
| 	ret = i915_gem_object_pin(to->obj, CONTEXT_ALIGN, false); | ||||
| 	if (ret) | ||||
| 		return ret; | ||||
| 
 | ||||
| 	/* Clear this page out of any CPU caches for coherent swap-in/out. Note
 | ||||
| 	 * that thanks to write = false in this call and us not setting any gpu | ||||
| 	 * write domains when putting a context object onto the active list | ||||
| 	 * (when switching away from it), this won't block. | ||||
| 	 * XXX: We need a real interface to do this instead of trickery. */ | ||||
| 	ret = i915_gem_object_set_to_gtt_domain(to->obj, false); | ||||
| 	if (ret) { | ||||
| 		i915_gem_object_unpin(to->obj); | ||||
| 		return ret; | ||||
| 	} | ||||
| 
 | ||||
| 	if (!to->obj->has_global_gtt_mapping) | ||||
| 		i915_gem_gtt_bind_object(to->obj, to->obj->cache_level); | ||||
| 
 | ||||
| 	if (!to->is_initialized || is_default_context(to)) | ||||
| 		hw_flags |= MI_RESTORE_INHIBIT; | ||||
| 	else if (WARN_ON_ONCE(from_obj == to->obj)) /* not yet expected */ | ||||
| 		hw_flags |= MI_FORCE_RESTORE; | ||||
| 
 | ||||
| 	ring = to->ring; | ||||
| 	ret = mi_set_context(ring, to, hw_flags); | ||||
| 	if (ret) { | ||||
| 		i915_gem_object_unpin(to->obj); | ||||
| 		return ret; | ||||
| 	} | ||||
| 
 | ||||
| 	/* The backing object for the context is done after switching to the
 | ||||
| 	 * *next* context. Therefore we cannot retire the previous context until | ||||
| 	 * the next context has already started running. In fact, the below code | ||||
| 	 * is a bit suboptimal because the retiring can occur simply after the | ||||
| 	 * MI_SET_CONTEXT instead of when the next seqno has completed. | ||||
| 	 */ | ||||
| 	if (from_obj != NULL) { | ||||
| 		from_obj->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION; | ||||
| 		i915_gem_object_move_to_active(from_obj, ring, seqno); | ||||
| 		/* As long as MI_SET_CONTEXT is serializing, ie. it flushes the
 | ||||
| 		 * whole damn pipeline, we don't need to explicitly mark the | ||||
| 		 * object dirty. The only exception is that the context must be | ||||
| 		 * correct in case the object gets swapped out. Ideally we'd be | ||||
| 		 * able to defer doing this until we know the object would be | ||||
| 		 * swapped, but there is no way to do that yet. | ||||
| 		 */ | ||||
| 		from_obj->dirty = 1; | ||||
| 		BUG_ON(from_obj->ring != to->ring); | ||||
| 		i915_gem_object_unpin(from_obj); | ||||
| 
 | ||||
| 		drm_gem_object_unreference(&from_obj->base); | ||||
| 	} | ||||
| 
 | ||||
| 	drm_gem_object_reference(&to->obj->base); | ||||
| 	ring->last_context_obj = to->obj; | ||||
| 	to->is_initialized = true; | ||||
| 
 | ||||
| 	return 0; | ||||
| } | ||||
| 
 | ||||
| /**
 | ||||
|  * i915_switch_context() - perform a GPU context switch. | ||||
|  * @ring: ring for which we'll execute the context switch | ||||
|  * @file_priv: file_priv associated with the context, may be NULL | ||||
|  * @id: context id number | ||||
|  * @seqno: sequence number by which the new context will be switched to | ||||
|  * @flags: | ||||
|  * | ||||
|  * The context life cycle is simple. The context refcount is incremented and | ||||
|  * decremented by 1 and create and destroy. If the context is in use by the GPU, | ||||
|  * it will have a refoucnt > 1. This allows us to destroy the context abstract | ||||
|  * object while letting the normal object tracking destroy the backing BO. | ||||
|  */ | ||||
| int i915_switch_context(struct intel_ring_buffer *ring, | ||||
| 			struct drm_file *file, | ||||
| 			int to_id) | ||||
| { | ||||
| 	struct drm_i915_private *dev_priv = ring->dev->dev_private; | ||||
| 	struct drm_i915_file_private *file_priv = NULL; | ||||
| 	struct i915_hw_context *to; | ||||
| 	struct drm_i915_gem_object *from_obj = ring->last_context_obj; | ||||
| 	int ret; | ||||
| 
 | ||||
| 	if (dev_priv->hw_contexts_disabled) | ||||
| 		return 0; | ||||
| 
 | ||||
| 	if (ring != &dev_priv->ring[RCS]) | ||||
| 		return 0; | ||||
| 
 | ||||
| 	if (file) | ||||
| 		file_priv = file->driver_priv; | ||||
| 
 | ||||
| 	if (to_id == DEFAULT_CONTEXT_ID) { | ||||
| 		to = ring->default_context; | ||||
| 	} else { | ||||
| 		to = i915_gem_context_get(file_priv, to_id); | ||||
| 		if (to == NULL) | ||||
| 			return -ENOENT; | ||||
| 	} | ||||
| 
 | ||||
| 	if (from_obj == to->obj) | ||||
| 		return 0; | ||||
| 
 | ||||
| 	return do_switch(from_obj, to, i915_gem_next_request_seqno(to->ring)); | ||||
| } | ||||
| 
 | ||||
| int i915_gem_context_create_ioctl(struct drm_device *dev, void *data, | ||||
| 				  struct drm_file *file) | ||||
| { | ||||
| 	struct drm_i915_private *dev_priv = dev->dev_private; | ||||
| 	struct drm_i915_gem_context_create *args = data; | ||||
| 	struct drm_i915_file_private *file_priv = file->driver_priv; | ||||
| 	struct i915_hw_context *ctx; | ||||
| 	int ret; | ||||
| 
 | ||||
| 	if (!(dev->driver->driver_features & DRIVER_GEM)) | ||||
| 		return -ENODEV; | ||||
| 
 | ||||
| 	if (dev_priv->hw_contexts_disabled) | ||||
| 		return -ENODEV; | ||||
| 
 | ||||
| 	ret = i915_mutex_lock_interruptible(dev); | ||||
| 	if (ret) | ||||
| 		return ret; | ||||
| 
 | ||||
| 	ctx = create_hw_context(dev, file_priv); | ||||
| 	mutex_unlock(&dev->struct_mutex); | ||||
| 	if (IS_ERR(ctx)) | ||||
| 		return PTR_ERR(ctx); | ||||
| 
 | ||||
| 	args->ctx_id = ctx->id; | ||||
| 	DRM_DEBUG_DRIVER("HW context %d created\n", args->ctx_id); | ||||
| 
 | ||||
| 	return 0; | ||||
| } | ||||
| 
 | ||||
| int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data, | ||||
| 				   struct drm_file *file) | ||||
| { | ||||
| 	struct drm_i915_gem_context_destroy *args = data; | ||||
| 	struct drm_i915_file_private *file_priv = file->driver_priv; | ||||
| 	struct i915_hw_context *ctx; | ||||
| 	int ret; | ||||
| 
 | ||||
| 	if (!(dev->driver->driver_features & DRIVER_GEM)) | ||||
| 		return -ENODEV; | ||||
| 
 | ||||
| 	ret = i915_mutex_lock_interruptible(dev); | ||||
| 	if (ret) | ||||
| 		return ret; | ||||
| 
 | ||||
| 	ctx = i915_gem_context_get(file_priv, args->ctx_id); | ||||
| 	if (!ctx) { | ||||
| 		mutex_unlock(&dev->struct_mutex); | ||||
| 		return -ENOENT; | ||||
| 	} | ||||
| 
 | ||||
| 	do_destroy(ctx); | ||||
| 
 | ||||
| 	mutex_unlock(&dev->struct_mutex); | ||||
| 
 | ||||
| 	DRM_DEBUG_DRIVER("HW context %d destroyed\n", args->ctx_id); | ||||
| 	return 0; | ||||
| } | ||||
| @ -132,7 +132,8 @@ i915_gem_object_check_coherency(struct drm_i915_gem_object *obj, int handle) | ||||
| 		 __func__, obj, obj->gtt_offset, handle, | ||||
| 		 obj->size / 1024); | ||||
| 
 | ||||
| 	gtt_mapping = ioremap(dev->agp->base + obj->gtt_offset, obj->base.size); | ||||
| 	gtt_mapping = ioremap(dev_priv->mm.gtt_base_addr + obj->gtt_offset, | ||||
| 			      obj->base.size); | ||||
| 	if (gtt_mapping == NULL) { | ||||
| 		DRM_ERROR("failed to map GTT space\n"); | ||||
| 		return; | ||||
|  | ||||
| @ -78,11 +78,12 @@ i915_gem_evict_something(struct drm_device *dev, int min_size, | ||||
| 
 | ||||
| 	INIT_LIST_HEAD(&unwind_list); | ||||
| 	if (mappable) | ||||
| 		drm_mm_init_scan_with_range(&dev_priv->mm.gtt_space, min_size, | ||||
| 					    alignment, 0, | ||||
| 					    dev_priv->mm.gtt_mappable_end); | ||||
| 		drm_mm_init_scan_with_range(&dev_priv->mm.gtt_space, | ||||
| 					    min_size, alignment, 0, | ||||
| 					    0, dev_priv->mm.gtt_mappable_end); | ||||
| 	else | ||||
| 		drm_mm_init_scan(&dev_priv->mm.gtt_space, min_size, alignment); | ||||
| 		drm_mm_init_scan(&dev_priv->mm.gtt_space, | ||||
| 				 min_size, alignment, 0); | ||||
| 
 | ||||
| 	/* First see if there is a large enough contiguous idle region... */ | ||||
| 	list_for_each_entry(obj, &dev_priv->mm.inactive_list, mm_list) { | ||||
|  | ||||
| @ -810,33 +810,16 @@ err: | ||||
| 	return ret; | ||||
| } | ||||
| 
 | ||||
| static int | ||||
| static void | ||||
| i915_gem_execbuffer_flush(struct drm_device *dev, | ||||
| 			  uint32_t invalidate_domains, | ||||
| 			  uint32_t flush_domains, | ||||
| 			  uint32_t flush_rings) | ||||
| 			  uint32_t flush_domains) | ||||
| { | ||||
| 	drm_i915_private_t *dev_priv = dev->dev_private; | ||||
| 	int i, ret; | ||||
| 
 | ||||
| 	if (flush_domains & I915_GEM_DOMAIN_CPU) | ||||
| 		intel_gtt_chipset_flush(); | ||||
| 
 | ||||
| 	if (flush_domains & I915_GEM_DOMAIN_GTT) | ||||
| 		wmb(); | ||||
| 
 | ||||
| 	if ((flush_domains | invalidate_domains) & I915_GEM_GPU_DOMAINS) { | ||||
| 		for (i = 0; i < I915_NUM_RINGS; i++) | ||||
| 			if (flush_rings & (1 << i)) { | ||||
| 				ret = i915_gem_flush_ring(&dev_priv->ring[i], | ||||
| 							  invalidate_domains, | ||||
| 							  flush_domains); | ||||
| 				if (ret) | ||||
| 					return ret; | ||||
| 			} | ||||
| 	} | ||||
| 
 | ||||
| 	return 0; | ||||
| } | ||||
| 
 | ||||
| static int | ||||
| @ -885,12 +868,9 @@ i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring, | ||||
| 		i915_gem_object_set_to_gpu_domain(obj, ring, &cd); | ||||
| 
 | ||||
| 	if (cd.invalidate_domains | cd.flush_domains) { | ||||
| 		ret = i915_gem_execbuffer_flush(ring->dev, | ||||
| 						cd.invalidate_domains, | ||||
| 						cd.flush_domains, | ||||
| 						cd.flush_rings); | ||||
| 		if (ret) | ||||
| 			return ret; | ||||
| 		i915_gem_execbuffer_flush(ring->dev, | ||||
| 					  cd.invalidate_domains, | ||||
| 					  cd.flush_domains); | ||||
| 	} | ||||
| 
 | ||||
| 	if (cd.flips) { | ||||
| @ -905,6 +885,16 @@ i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring, | ||||
| 			return ret; | ||||
| 	} | ||||
| 
 | ||||
| 	/* Unconditionally invalidate gpu caches and ensure that we do flush
 | ||||
| 	 * any residual writes from the previous batch. | ||||
| 	 */ | ||||
| 	ret = i915_gem_flush_ring(ring, | ||||
| 				  I915_GEM_GPU_DOMAINS, | ||||
| 				  ring->gpu_caches_dirty ? I915_GEM_GPU_DOMAINS : 0); | ||||
| 	if (ret) | ||||
| 		return ret; | ||||
| 
 | ||||
| 	ring->gpu_caches_dirty = false; | ||||
| 	return 0; | ||||
| } | ||||
| 
 | ||||
| @ -983,26 +973,13 @@ i915_gem_execbuffer_retire_commands(struct drm_device *dev, | ||||
| 				    struct intel_ring_buffer *ring) | ||||
| { | ||||
| 	struct drm_i915_gem_request *request; | ||||
| 	u32 invalidate; | ||||
| 
 | ||||
| 	/*
 | ||||
| 	 * Ensure that the commands in the batch buffer are | ||||
| 	 * finished before the interrupt fires. | ||||
| 	 * | ||||
| 	 * The sampler always gets flushed on i965 (sigh). | ||||
| 	 */ | ||||
| 	invalidate = I915_GEM_DOMAIN_COMMAND; | ||||
| 	if (INTEL_INFO(dev)->gen >= 4) | ||||
| 		invalidate |= I915_GEM_DOMAIN_SAMPLER; | ||||
| 	if (ring->flush(ring, invalidate, 0)) { | ||||
| 		i915_gem_next_request_seqno(ring); | ||||
| 		return; | ||||
| 	} | ||||
| 	/* Unconditionally force add_request to emit a full flush. */ | ||||
| 	ring->gpu_caches_dirty = true; | ||||
| 
 | ||||
| 	/* Add a breadcrumb for the completion of the batch buffer */ | ||||
| 	request = kzalloc(sizeof(*request), GFP_KERNEL); | ||||
| 	if (request == NULL || i915_add_request(ring, file, request)) { | ||||
| 		i915_gem_next_request_seqno(ring); | ||||
| 		kfree(request); | ||||
| 	} | ||||
| } | ||||
| @ -1044,6 +1021,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, | ||||
| 	struct drm_i915_gem_object *batch_obj; | ||||
| 	struct drm_clip_rect *cliprects = NULL; | ||||
| 	struct intel_ring_buffer *ring; | ||||
| 	u32 ctx_id = i915_execbuffer2_get_context_id(*args); | ||||
| 	u32 exec_start, exec_len; | ||||
| 	u32 seqno; | ||||
| 	u32 mask; | ||||
| @ -1065,9 +1043,19 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, | ||||
| 		break; | ||||
| 	case I915_EXEC_BSD: | ||||
| 		ring = &dev_priv->ring[VCS]; | ||||
| 		if (ctx_id != 0) { | ||||
| 			DRM_DEBUG("Ring %s doesn't support contexts\n", | ||||
| 				  ring->name); | ||||
| 			return -EPERM; | ||||
| 		} | ||||
| 		break; | ||||
| 	case I915_EXEC_BLT: | ||||
| 		ring = &dev_priv->ring[BCS]; | ||||
| 		if (ctx_id != 0) { | ||||
| 			DRM_DEBUG("Ring %s doesn't support contexts\n", | ||||
| 				  ring->name); | ||||
| 			return -EPERM; | ||||
| 		} | ||||
| 		break; | ||||
| 	default: | ||||
| 		DRM_DEBUG("execbuf with unknown ring: %d\n", | ||||
| @ -1240,6 +1228,10 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	ret = i915_switch_context(ring, file, ctx_id); | ||||
| 	if (ret) | ||||
| 		goto err; | ||||
| 
 | ||||
| 	if (ring == &dev_priv->ring[RCS] && | ||||
| 	    mode != dev_priv->relative_constants_mode) { | ||||
| 		ret = intel_ring_begin(ring, 4); | ||||
| @ -1367,6 +1359,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, | ||||
| 	exec2.num_cliprects = args->num_cliprects; | ||||
| 	exec2.cliprects_ptr = args->cliprects_ptr; | ||||
| 	exec2.flags = I915_EXEC_RENDER; | ||||
| 	i915_execbuffer2_set_context_id(exec2, 0); | ||||
| 
 | ||||
| 	ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list); | ||||
| 	if (!ret) { | ||||
|  | ||||
| @ -375,6 +375,86 @@ static void gen6_pm_rps_work(struct work_struct *work) | ||||
| 	mutex_unlock(&dev_priv->dev->struct_mutex); | ||||
| } | ||||
| 
 | ||||
| 
 | ||||
| /**
 | ||||
|  * ivybridge_parity_work - Workqueue called when a parity error interrupt | ||||
|  * occurred. | ||||
|  * @work: workqueue struct | ||||
|  * | ||||
|  * Doesn't actually do anything except notify userspace. As a consequence of | ||||
|  * this event, userspace should try to remap the bad rows since statistically | ||||
|  * it is likely the same row is more likely to go bad again. | ||||
|  */ | ||||
| static void ivybridge_parity_work(struct work_struct *work) | ||||
| { | ||||
| 	drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, | ||||
| 						    parity_error_work); | ||||
| 	u32 error_status, row, bank, subbank; | ||||
| 	char *parity_event[5]; | ||||
| 	uint32_t misccpctl; | ||||
| 	unsigned long flags; | ||||
| 
 | ||||
| 	/* We must turn off DOP level clock gating to access the L3 registers.
 | ||||
| 	 * In order to prevent a get/put style interface, acquire struct mutex | ||||
| 	 * any time we access those registers. | ||||
| 	 */ | ||||
| 	mutex_lock(&dev_priv->dev->struct_mutex); | ||||
| 
 | ||||
| 	misccpctl = I915_READ(GEN7_MISCCPCTL); | ||||
| 	I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE); | ||||
| 	POSTING_READ(GEN7_MISCCPCTL); | ||||
| 
 | ||||
| 	error_status = I915_READ(GEN7_L3CDERRST1); | ||||
| 	row = GEN7_PARITY_ERROR_ROW(error_status); | ||||
| 	bank = GEN7_PARITY_ERROR_BANK(error_status); | ||||
| 	subbank = GEN7_PARITY_ERROR_SUBBANK(error_status); | ||||
| 
 | ||||
| 	I915_WRITE(GEN7_L3CDERRST1, GEN7_PARITY_ERROR_VALID | | ||||
| 				    GEN7_L3CDERRST1_ENABLE); | ||||
| 	POSTING_READ(GEN7_L3CDERRST1); | ||||
| 
 | ||||
| 	I915_WRITE(GEN7_MISCCPCTL, misccpctl); | ||||
| 
 | ||||
| 	spin_lock_irqsave(&dev_priv->irq_lock, flags); | ||||
| 	dev_priv->gt_irq_mask &= ~GT_GEN7_L3_PARITY_ERROR_INTERRUPT; | ||||
| 	I915_WRITE(GTIMR, dev_priv->gt_irq_mask); | ||||
| 	spin_unlock_irqrestore(&dev_priv->irq_lock, flags); | ||||
| 
 | ||||
| 	mutex_unlock(&dev_priv->dev->struct_mutex); | ||||
| 
 | ||||
| 	parity_event[0] = "L3_PARITY_ERROR=1"; | ||||
| 	parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row); | ||||
| 	parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank); | ||||
| 	parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank); | ||||
| 	parity_event[4] = NULL; | ||||
| 
 | ||||
| 	kobject_uevent_env(&dev_priv->dev->primary->kdev.kobj, | ||||
| 			   KOBJ_CHANGE, parity_event); | ||||
| 
 | ||||
| 	DRM_DEBUG("Parity error: Row = %d, Bank = %d, Sub bank = %d.\n", | ||||
| 		  row, bank, subbank); | ||||
| 
 | ||||
| 	kfree(parity_event[3]); | ||||
| 	kfree(parity_event[2]); | ||||
| 	kfree(parity_event[1]); | ||||
| } | ||||
| 
 | ||||
| static void ivybridge_handle_parity_error(struct drm_device *dev) | ||||
| { | ||||
| 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | ||||
| 	unsigned long flags; | ||||
| 
 | ||||
| 	if (!IS_IVYBRIDGE(dev)) | ||||
| 		return; | ||||
| 
 | ||||
| 	spin_lock_irqsave(&dev_priv->irq_lock, flags); | ||||
| 	dev_priv->gt_irq_mask |= GT_GEN7_L3_PARITY_ERROR_INTERRUPT; | ||||
| 	I915_WRITE(GTIMR, dev_priv->gt_irq_mask); | ||||
| 	spin_unlock_irqrestore(&dev_priv->irq_lock, flags); | ||||
| 
 | ||||
| 	queue_work(dev_priv->wq, &dev_priv->parity_error_work); | ||||
| } | ||||
| 
 | ||||
| static void snb_gt_irq_handler(struct drm_device *dev, | ||||
| 			       struct drm_i915_private *dev_priv, | ||||
| 			       u32 gt_iir) | ||||
| @ -394,6 +474,9 @@ static void snb_gt_irq_handler(struct drm_device *dev, | ||||
| 		DRM_ERROR("GT error interrupt 0x%08x\n", gt_iir); | ||||
| 		i915_handle_error(dev, false); | ||||
| 	} | ||||
| 
 | ||||
| 	if (gt_iir & GT_GEN7_L3_PARITY_ERROR_INTERRUPT) | ||||
| 		ivybridge_handle_parity_error(dev); | ||||
| } | ||||
| 
 | ||||
| static void gen6_queue_rps_work(struct drm_i915_private *dev_priv, | ||||
| @ -429,15 +512,10 @@ static irqreturn_t valleyview_irq_handler(DRM_IRQ_ARGS) | ||||
| 	unsigned long irqflags; | ||||
| 	int pipe; | ||||
| 	u32 pipe_stats[I915_MAX_PIPES]; | ||||
| 	u32 vblank_status; | ||||
| 	int vblank = 0; | ||||
| 	bool blc_event; | ||||
| 
 | ||||
| 	atomic_inc(&dev_priv->irq_received); | ||||
| 
 | ||||
| 	vblank_status = PIPE_START_VBLANK_INTERRUPT_STATUS | | ||||
| 		PIPE_VBLANK_INTERRUPT_STATUS; | ||||
| 
 | ||||
| 	while (true) { | ||||
| 		iir = I915_READ(VLV_IIR); | ||||
| 		gt_iir = I915_READ(GTIIR); | ||||
| @ -467,6 +545,16 @@ static irqreturn_t valleyview_irq_handler(DRM_IRQ_ARGS) | ||||
| 		} | ||||
| 		spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); | ||||
| 
 | ||||
| 		for_each_pipe(pipe) { | ||||
| 			if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS) | ||||
| 				drm_handle_vblank(dev, pipe); | ||||
| 
 | ||||
| 			if (pipe_stats[pipe] & PLANE_FLIPDONE_INT_STATUS_VLV) { | ||||
| 				intel_prepare_page_flip(dev, pipe); | ||||
| 				intel_finish_page_flip(dev, pipe); | ||||
| 			} | ||||
| 		} | ||||
| 
 | ||||
| 		/* Consume port.  Then clear IIR or we'll miss events */ | ||||
| 		if (iir & I915_DISPLAY_PORT_INTERRUPT) { | ||||
| 			u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); | ||||
| @ -481,19 +569,6 @@ static irqreturn_t valleyview_irq_handler(DRM_IRQ_ARGS) | ||||
| 			I915_READ(PORT_HOTPLUG_STAT); | ||||
| 		} | ||||
| 
 | ||||
| 
 | ||||
| 		if (iir & I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT) { | ||||
| 			drm_handle_vblank(dev, 0); | ||||
| 			vblank++; | ||||
| 			intel_finish_page_flip(dev, 0); | ||||
| 		} | ||||
| 
 | ||||
| 		if (iir & I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT) { | ||||
| 			drm_handle_vblank(dev, 1); | ||||
| 			vblank++; | ||||
| 			intel_finish_page_flip(dev, 0); | ||||
| 		} | ||||
| 
 | ||||
| 		if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) | ||||
| 			blc_event = true; | ||||
| 
 | ||||
| @ -991,6 +1066,7 @@ static void i915_record_ring_state(struct drm_device *dev, | ||||
| 	struct drm_i915_private *dev_priv = dev->dev_private; | ||||
| 
 | ||||
| 	if (INTEL_INFO(dev)->gen >= 6) { | ||||
| 		error->rc_psmi[ring->id] = I915_READ(ring->mmio_base + 0x50); | ||||
| 		error->fault_reg[ring->id] = I915_READ(RING_FAULT_REG(ring)); | ||||
| 		error->semaphore_mboxes[ring->id][0] | ||||
| 			= I915_READ(RING_SYNC_0(ring->mmio_base)); | ||||
| @ -1104,6 +1180,7 @@ static void i915_capture_error_state(struct drm_device *dev) | ||||
| 	kref_init(&error->ref); | ||||
| 	error->eir = I915_READ(EIR); | ||||
| 	error->pgtbl_er = I915_READ(PGTBL_ER); | ||||
| 	error->ccid = I915_READ(CCID); | ||||
| 
 | ||||
| 	if (HAS_PCH_SPLIT(dev)) | ||||
| 		error->ier = I915_READ(DEIER) | I915_READ(GTIER); | ||||
| @ -1426,23 +1503,20 @@ static int valleyview_enable_vblank(struct drm_device *dev, int pipe) | ||||
| { | ||||
| 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | ||||
| 	unsigned long irqflags; | ||||
| 	u32 dpfl, imr; | ||||
| 	u32 imr; | ||||
| 
 | ||||
| 	if (!i915_pipe_enabled(dev, pipe)) | ||||
| 		return -EINVAL; | ||||
| 
 | ||||
| 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | ||||
| 	dpfl = I915_READ(VLV_DPFLIPSTAT); | ||||
| 	imr = I915_READ(VLV_IMR); | ||||
| 	if (pipe == 0) { | ||||
| 		dpfl |= PIPEA_VBLANK_INT_EN; | ||||
| 	if (pipe == 0) | ||||
| 		imr &= ~I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT; | ||||
| 	} else { | ||||
| 		dpfl |= PIPEA_VBLANK_INT_EN; | ||||
| 	else | ||||
| 		imr &= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; | ||||
| 	} | ||||
| 	I915_WRITE(VLV_DPFLIPSTAT, dpfl); | ||||
| 	I915_WRITE(VLV_IMR, imr); | ||||
| 	i915_enable_pipestat(dev_priv, pipe, | ||||
| 			     PIPE_START_VBLANK_INTERRUPT_ENABLE); | ||||
| 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); | ||||
| 
 | ||||
| 	return 0; | ||||
| @ -1492,20 +1566,17 @@ static void valleyview_disable_vblank(struct drm_device *dev, int pipe) | ||||
| { | ||||
| 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | ||||
| 	unsigned long irqflags; | ||||
| 	u32 dpfl, imr; | ||||
| 	u32 imr; | ||||
| 
 | ||||
| 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | ||||
| 	dpfl = I915_READ(VLV_DPFLIPSTAT); | ||||
| 	i915_disable_pipestat(dev_priv, pipe, | ||||
| 			      PIPE_START_VBLANK_INTERRUPT_ENABLE); | ||||
| 	imr = I915_READ(VLV_IMR); | ||||
| 	if (pipe == 0) { | ||||
| 		dpfl &= ~PIPEA_VBLANK_INT_EN; | ||||
| 	if (pipe == 0) | ||||
| 		imr |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT; | ||||
| 	} else { | ||||
| 		dpfl &= ~PIPEB_VBLANK_INT_EN; | ||||
| 	else | ||||
| 		imr |= I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; | ||||
| 	} | ||||
| 	I915_WRITE(VLV_IMR, imr); | ||||
| 	I915_WRITE(VLV_DPFLIPSTAT, dpfl); | ||||
| 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); | ||||
| } | ||||
| 
 | ||||
| @ -1648,7 +1719,6 @@ static void ironlake_irq_preinstall(struct drm_device *dev) | ||||
| 
 | ||||
| 	atomic_set(&dev_priv->irq_received, 0); | ||||
| 
 | ||||
| 
 | ||||
| 	I915_WRITE(HWSTAM, 0xeffe); | ||||
| 
 | ||||
| 	/* XXX hotplug from PCH */ | ||||
| @ -1811,13 +1881,13 @@ static int ivybridge_irq_postinstall(struct drm_device *dev) | ||||
| 		   DE_PIPEA_VBLANK_IVB); | ||||
| 	POSTING_READ(DEIER); | ||||
| 
 | ||||
| 	dev_priv->gt_irq_mask = ~0; | ||||
| 	dev_priv->gt_irq_mask = ~GT_GEN7_L3_PARITY_ERROR_INTERRUPT; | ||||
| 
 | ||||
| 	I915_WRITE(GTIIR, I915_READ(GTIIR)); | ||||
| 	I915_WRITE(GTIMR, dev_priv->gt_irq_mask); | ||||
| 
 | ||||
| 	render_irqs = GT_USER_INTERRUPT | GEN6_BSD_USER_INTERRUPT | | ||||
| 		GEN6_BLITTER_USER_INTERRUPT; | ||||
| 		GEN6_BLITTER_USER_INTERRUPT | GT_GEN7_L3_PARITY_ERROR_INTERRUPT; | ||||
| 	I915_WRITE(GTIER, render_irqs); | ||||
| 	POSTING_READ(GTIER); | ||||
| 
 | ||||
| @ -1840,16 +1910,24 @@ static int ivybridge_irq_postinstall(struct drm_device *dev) | ||||
| static int valleyview_irq_postinstall(struct drm_device *dev) | ||||
| { | ||||
| 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | ||||
| 	u32 render_irqs; | ||||
| 	u32 enable_mask; | ||||
| 	u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN); | ||||
| 	u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV; | ||||
| 	u16 msid; | ||||
| 
 | ||||
| 	enable_mask = I915_DISPLAY_PORT_INTERRUPT; | ||||
| 	enable_mask |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT | | ||||
| 	enable_mask |= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | | ||||
| 		I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT | | ||||
| 		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | | ||||
| 		I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; | ||||
| 
 | ||||
| 	dev_priv->irq_mask = ~enable_mask; | ||||
| 	/*
 | ||||
| 	 *Leave vblank interrupts masked initially.  enable/disable will | ||||
| 	 * toggle them based on usage. | ||||
| 	 */ | ||||
| 	dev_priv->irq_mask = (~enable_mask) | | ||||
| 		I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT | | ||||
| 		I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; | ||||
| 
 | ||||
| 	dev_priv->pipestat[0] = 0; | ||||
| 	dev_priv->pipestat[1] = 0; | ||||
| @ -1868,26 +1946,27 @@ static int valleyview_irq_postinstall(struct drm_device *dev) | ||||
| 	I915_WRITE(PIPESTAT(1), 0xffff); | ||||
| 	POSTING_READ(VLV_IER); | ||||
| 
 | ||||
| 	i915_enable_pipestat(dev_priv, 0, pipestat_enable); | ||||
| 	i915_enable_pipestat(dev_priv, 1, pipestat_enable); | ||||
| 
 | ||||
| 	I915_WRITE(VLV_IIR, 0xffffffff); | ||||
| 	I915_WRITE(VLV_IIR, 0xffffffff); | ||||
| 
 | ||||
| 	render_irqs = GT_GEN6_BLT_FLUSHDW_NOTIFY_INTERRUPT | | ||||
| 		GT_GEN6_BLT_CS_ERROR_INTERRUPT | | ||||
| 		GT_GEN6_BLT_USER_INTERRUPT | | ||||
| 		GT_GEN6_BSD_USER_INTERRUPT | | ||||
| 		GT_GEN6_BSD_CS_ERROR_INTERRUPT | | ||||
| 		GT_GEN7_L3_PARITY_ERROR_INTERRUPT | | ||||
| 		GT_PIPE_NOTIFY | | ||||
| 		GT_RENDER_CS_ERROR_INTERRUPT | | ||||
| 		GT_SYNC_STATUS | | ||||
| 		GT_USER_INTERRUPT; | ||||
| 
 | ||||
| 	dev_priv->gt_irq_mask = ~render_irqs; | ||||
| 	dev_priv->gt_irq_mask = ~0; | ||||
| 
 | ||||
| 	I915_WRITE(GTIIR, I915_READ(GTIIR)); | ||||
| 	I915_WRITE(GTIIR, I915_READ(GTIIR)); | ||||
| 	I915_WRITE(GTIMR, 0); | ||||
| 	I915_WRITE(GTIER, render_irqs); | ||||
| 	I915_WRITE(GTIMR, dev_priv->gt_irq_mask); | ||||
| 	I915_WRITE(GTIER, GT_GEN6_BLT_FLUSHDW_NOTIFY_INTERRUPT | | ||||
| 		   GT_GEN6_BLT_CS_ERROR_INTERRUPT | | ||||
| 		   GT_GEN6_BLT_USER_INTERRUPT | | ||||
| 		   GT_GEN6_BSD_USER_INTERRUPT | | ||||
| 		   GT_GEN6_BSD_CS_ERROR_INTERRUPT | | ||||
| 		   GT_GEN7_L3_PARITY_ERROR_INTERRUPT | | ||||
| 		   GT_PIPE_NOTIFY | | ||||
| 		   GT_RENDER_CS_ERROR_INTERRUPT | | ||||
| 		   GT_SYNC_STATUS | | ||||
| 		   GT_USER_INTERRUPT); | ||||
| 	POSTING_READ(GTIER); | ||||
| 
 | ||||
| 	/* ack & enable invalid PTE error interrupts */ | ||||
| @ -2166,9 +2245,9 @@ static int i915_irq_postinstall(struct drm_device *dev) | ||||
| 			hotplug_en |= HDMIC_HOTPLUG_INT_EN; | ||||
| 		if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS) | ||||
| 			hotplug_en |= HDMID_HOTPLUG_INT_EN; | ||||
| 		if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS) | ||||
| 		if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_I915) | ||||
| 			hotplug_en |= SDVOC_HOTPLUG_INT_EN; | ||||
| 		if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS) | ||||
| 		if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_I915) | ||||
| 			hotplug_en |= SDVOB_HOTPLUG_INT_EN; | ||||
| 		if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) { | ||||
| 			hotplug_en |= CRT_HOTPLUG_INT_EN; | ||||
| @ -2328,10 +2407,8 @@ static void i965_irq_preinstall(struct drm_device * dev) | ||||
| 
 | ||||
| 	atomic_set(&dev_priv->irq_received, 0); | ||||
| 
 | ||||
| 	if (I915_HAS_HOTPLUG(dev)) { | ||||
| 		I915_WRITE(PORT_HOTPLUG_EN, 0); | ||||
| 		I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); | ||||
| 	} | ||||
| 	I915_WRITE(PORT_HOTPLUG_EN, 0); | ||||
| 	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); | ||||
| 
 | ||||
| 	I915_WRITE(HWSTAM, 0xeffe); | ||||
| 	for_each_pipe(pipe) | ||||
| @ -2344,11 +2421,13 @@ static void i965_irq_preinstall(struct drm_device * dev) | ||||
| static int i965_irq_postinstall(struct drm_device *dev) | ||||
| { | ||||
| 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | ||||
| 	u32 hotplug_en; | ||||
| 	u32 enable_mask; | ||||
| 	u32 error_mask; | ||||
| 
 | ||||
| 	/* Unmask the interrupts that we always want on. */ | ||||
| 	dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT | | ||||
| 			       I915_DISPLAY_PORT_INTERRUPT | | ||||
| 			       I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | | ||||
| 			       I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | | ||||
| 			       I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | | ||||
| @ -2364,13 +2443,6 @@ static int i965_irq_postinstall(struct drm_device *dev) | ||||
| 	dev_priv->pipestat[0] = 0; | ||||
| 	dev_priv->pipestat[1] = 0; | ||||
| 
 | ||||
| 	if (I915_HAS_HOTPLUG(dev)) { | ||||
| 		/* Enable in IER... */ | ||||
| 		enable_mask |= I915_DISPLAY_PORT_INTERRUPT; | ||||
| 		/* and unmask in IMR */ | ||||
| 		dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT; | ||||
| 	} | ||||
| 
 | ||||
| 	/*
 | ||||
| 	 * Enable some error detection, note the instruction error mask | ||||
| 	 * bit is reserved, so we leave it masked. | ||||
| @ -2390,36 +2462,40 @@ static int i965_irq_postinstall(struct drm_device *dev) | ||||
| 	I915_WRITE(IER, enable_mask); | ||||
| 	POSTING_READ(IER); | ||||
| 
 | ||||
| 	if (I915_HAS_HOTPLUG(dev)) { | ||||
| 		u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN); | ||||
| 
 | ||||
| 		/* Note HDMI and DP share bits */ | ||||
| 		if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS) | ||||
| 			hotplug_en |= HDMIB_HOTPLUG_INT_EN; | ||||
| 		if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS) | ||||
| 			hotplug_en |= HDMIC_HOTPLUG_INT_EN; | ||||
| 		if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS) | ||||
| 			hotplug_en |= HDMID_HOTPLUG_INT_EN; | ||||
| 		if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS) | ||||
| 	/* Note HDMI and DP share hotplug bits */ | ||||
| 	hotplug_en = 0; | ||||
| 	if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS) | ||||
| 		hotplug_en |= HDMIB_HOTPLUG_INT_EN; | ||||
| 	if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS) | ||||
| 		hotplug_en |= HDMIC_HOTPLUG_INT_EN; | ||||
| 	if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS) | ||||
| 		hotplug_en |= HDMID_HOTPLUG_INT_EN; | ||||
| 	if (IS_G4X(dev)) { | ||||
| 		if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_G4X) | ||||
| 			hotplug_en |= SDVOC_HOTPLUG_INT_EN; | ||||
| 		if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS) | ||||
| 		if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_G4X) | ||||
| 			hotplug_en |= SDVOB_HOTPLUG_INT_EN; | ||||
| 	} else { | ||||
| 		if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_I965) | ||||
| 			hotplug_en |= SDVOC_HOTPLUG_INT_EN; | ||||
| 		if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_I965) | ||||
| 			hotplug_en |= SDVOB_HOTPLUG_INT_EN; | ||||
| 		if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) { | ||||
| 			hotplug_en |= CRT_HOTPLUG_INT_EN; | ||||
| 
 | ||||
| 			/* Programming the CRT detection parameters tends
 | ||||
| 			   to generate a spurious hotplug event about three | ||||
| 			   seconds later.  So just do it once. | ||||
| 			*/ | ||||
| 			if (IS_G4X(dev)) | ||||
| 				hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64; | ||||
| 			hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50; | ||||
| 		} | ||||
| 
 | ||||
| 		/* Ignore TV since it's buggy */ | ||||
| 
 | ||||
| 		I915_WRITE(PORT_HOTPLUG_EN, hotplug_en); | ||||
| 	} | ||||
| 	if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) { | ||||
| 		hotplug_en |= CRT_HOTPLUG_INT_EN; | ||||
| 
 | ||||
| 		/* Programming the CRT detection parameters tends
 | ||||
| 		   to generate a spurious hotplug event about three | ||||
| 		   seconds later.  So just do it once. | ||||
| 		   */ | ||||
| 		if (IS_G4X(dev)) | ||||
| 			hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64; | ||||
| 		hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50; | ||||
| 	} | ||||
| 
 | ||||
| 	/* Ignore TV since it's buggy */ | ||||
| 
 | ||||
| 	I915_WRITE(PORT_HOTPLUG_EN, hotplug_en); | ||||
| 
 | ||||
| 	intel_opregion_enable_asle(dev); | ||||
| 
 | ||||
| @ -2477,8 +2553,7 @@ static irqreturn_t i965_irq_handler(DRM_IRQ_ARGS) | ||||
| 		ret = IRQ_HANDLED; | ||||
| 
 | ||||
| 		/* Consume port.  Then clear IIR or we'll miss events */ | ||||
| 		if ((I915_HAS_HOTPLUG(dev)) && | ||||
| 		    (iir & I915_DISPLAY_PORT_INTERRUPT)) { | ||||
| 		if (iir & I915_DISPLAY_PORT_INTERRUPT) { | ||||
| 			u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); | ||||
| 
 | ||||
| 			DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n", | ||||
| @ -2551,10 +2626,8 @@ static void i965_irq_uninstall(struct drm_device * dev) | ||||
| 	if (!dev_priv) | ||||
| 		return; | ||||
| 
 | ||||
| 	if (I915_HAS_HOTPLUG(dev)) { | ||||
| 		I915_WRITE(PORT_HOTPLUG_EN, 0); | ||||
| 		I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); | ||||
| 	} | ||||
| 	I915_WRITE(PORT_HOTPLUG_EN, 0); | ||||
| 	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); | ||||
| 
 | ||||
| 	I915_WRITE(HWSTAM, 0xffffffff); | ||||
| 	for_each_pipe(pipe) | ||||
| @ -2575,6 +2648,7 @@ void intel_irq_init(struct drm_device *dev) | ||||
| 	INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func); | ||||
| 	INIT_WORK(&dev_priv->error_work, i915_error_work_func); | ||||
| 	INIT_WORK(&dev_priv->rps_work, gen6_pm_rps_work); | ||||
| 	INIT_WORK(&dev_priv->parity_error_work, ivybridge_parity_work); | ||||
| 
 | ||||
| 	dev->driver->get_vblank_counter = i915_get_vblank_counter; | ||||
| 	dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ | ||||
|  | ||||
| @ -217,6 +217,9 @@ | ||||
| #define   MI_DISPLAY_FLIP_IVB_SPRITE_B (3 << 19) | ||||
| #define   MI_DISPLAY_FLIP_IVB_PLANE_C  (4 << 19) | ||||
| #define   MI_DISPLAY_FLIP_IVB_SPRITE_C (5 << 19) | ||||
| #define MI_ARB_ON_OFF		MI_INSTR(0x08, 0) | ||||
| #define   MI_ARB_ENABLE			(1<<0) | ||||
| #define   MI_ARB_DISABLE		(0<<0) | ||||
| 
 | ||||
| #define MI_SET_CONTEXT		MI_INSTR(0x18, 0) | ||||
| #define   MI_MM_SPACE_GTT		(1<<8) | ||||
| @ -299,6 +302,7 @@ | ||||
| #define   DISPLAY_PLANE_B           (1<<20) | ||||
| #define GFX_OP_PIPE_CONTROL(len)	((0x3<<29)|(0x3<<27)|(0x2<<24)|(len-2)) | ||||
| #define   PIPE_CONTROL_CS_STALL				(1<<20) | ||||
| #define   PIPE_CONTROL_TLB_INVALIDATE			(1<<18) | ||||
| #define   PIPE_CONTROL_QW_WRITE				(1<<14) | ||||
| #define   PIPE_CONTROL_DEPTH_STALL			(1<<13) | ||||
| #define   PIPE_CONTROL_WRITE_FLUSH			(1<<12) | ||||
| @ -686,10 +690,10 @@ | ||||
| #define   GEN6_BLITTER_FBC_NOTIFY			(1<<3) | ||||
| 
 | ||||
| #define GEN6_BSD_SLEEP_PSMI_CONTROL	0x12050 | ||||
| #define   GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK	(1 << 16) | ||||
| #define   GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_DISABLE		(1 << 0) | ||||
| #define   GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_ENABLE		0 | ||||
| #define   GEN6_BSD_SLEEP_PSMI_CONTROL_IDLE_INDICATOR			(1 << 3) | ||||
| #define   GEN6_BSD_SLEEP_MSG_DISABLE	(1 << 0) | ||||
| #define   GEN6_BSD_SLEEP_FLUSH_DISABLE	(1 << 2) | ||||
| #define   GEN6_BSD_SLEEP_INDICATOR	(1 << 3) | ||||
| #define   GEN6_BSD_GO_INDICATOR		(1 << 4) | ||||
| 
 | ||||
| #define GEN6_BSD_HWSTAM			0x12098 | ||||
| #define GEN6_BSD_IMR			0x120a8 | ||||
| @ -908,6 +912,7 @@ | ||||
| #define   DPLL_P2_CLOCK_DIV_MASK	0x03000000 /* i915 */ | ||||
| #define   DPLL_FPA01_P1_POST_DIV_MASK	0x00ff0000 /* i915 */ | ||||
| #define   DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW	0x00ff8000 /* Pineview */ | ||||
| #define   DPLL_LOCK_VLV			(1<<15) | ||||
| #define   DPLL_INTEGRATED_CLOCK_VLV	(1<<13) | ||||
| 
 | ||||
| #define SRX_INDEX		0x3c4 | ||||
| @ -1453,6 +1458,10 @@ | ||||
| #define DDRMPLL1		0X12c20 | ||||
| #define PEG_BAND_GAP_DATA	0x14d68 | ||||
| 
 | ||||
| #define GEN6_GT_THREAD_STATUS_REG 0x13805c | ||||
| #define GEN6_GT_THREAD_STATUS_CORE_MASK 0x7 | ||||
| #define GEN6_GT_THREAD_STATUS_CORE_MASK_HSW (0x7 | (0x07 << 16)) | ||||
| 
 | ||||
| #define GEN6_GT_PERF_STATUS	0x145948 | ||||
| #define GEN6_RP_STATE_LIMITS	0x145994 | ||||
| #define GEN6_RP_STATE_CAP	0x145998 | ||||
| @ -1462,6 +1471,31 @@ | ||||
|  */ | ||||
| #define CCID			0x2180 | ||||
| #define   CCID_EN		(1<<0) | ||||
| #define CXT_SIZE		0x21a0 | ||||
| #define GEN6_CXT_POWER_SIZE(cxt_reg)	((cxt_reg >> 24) & 0x3f) | ||||
| #define GEN6_CXT_RING_SIZE(cxt_reg)	((cxt_reg >> 18) & 0x3f) | ||||
| #define GEN6_CXT_RENDER_SIZE(cxt_reg)	((cxt_reg >> 12) & 0x3f) | ||||
| #define GEN6_CXT_EXTENDED_SIZE(cxt_reg)	((cxt_reg >> 6) & 0x3f) | ||||
| #define GEN6_CXT_PIPELINE_SIZE(cxt_reg)	((cxt_reg >> 0) & 0x3f) | ||||
| #define GEN6_CXT_TOTAL_SIZE(cxt_reg)	(GEN6_CXT_POWER_SIZE(cxt_reg) + \ | ||||
| 					GEN6_CXT_RING_SIZE(cxt_reg) + \ | ||||
| 					GEN6_CXT_RENDER_SIZE(cxt_reg) + \ | ||||
| 					GEN6_CXT_EXTENDED_SIZE(cxt_reg) + \ | ||||
| 					GEN6_CXT_PIPELINE_SIZE(cxt_reg)) | ||||
| #define GEN7_CXT_SIZE		0x21a8 | ||||
| #define GEN7_CXT_POWER_SIZE(ctx_reg)	((ctx_reg >> 25) & 0x7f) | ||||
| #define GEN7_CXT_RING_SIZE(ctx_reg)	((ctx_reg >> 22) & 0x7) | ||||
| #define GEN7_CXT_RENDER_SIZE(ctx_reg)	((ctx_reg >> 16) & 0x3f) | ||||
| #define GEN7_CXT_EXTENDED_SIZE(ctx_reg)	((ctx_reg >> 9) & 0x7f) | ||||
| #define GEN7_CXT_GT1_SIZE(ctx_reg)	((ctx_reg >> 6) & 0x7) | ||||
| #define GEN7_CXT_VFSTATE_SIZE(ctx_reg)	((ctx_reg >> 0) & 0x3f) | ||||
| #define GEN7_CXT_TOTAL_SIZE(ctx_reg)	(GEN7_CXT_POWER_SIZE(ctx_reg) + \ | ||||
| 					 GEN7_CXT_RING_SIZE(ctx_reg) + \ | ||||
| 					 GEN7_CXT_RENDER_SIZE(ctx_reg) + \ | ||||
| 					 GEN7_CXT_EXTENDED_SIZE(ctx_reg) + \ | ||||
| 					 GEN7_CXT_GT1_SIZE(ctx_reg) + \ | ||||
| 					 GEN7_CXT_VFSTATE_SIZE(ctx_reg)) | ||||
| 
 | ||||
| /*
 | ||||
|  * Overlay regs | ||||
|  */ | ||||
| @ -1566,20 +1600,34 @@ | ||||
| #define CRT_HOTPLUG_DETECT_VOLTAGE_475MV	(1 << 2) | ||||
| 
 | ||||
| #define PORT_HOTPLUG_STAT	0x61114 | ||||
| #define   HDMIB_HOTPLUG_INT_STATUS		(1 << 29) | ||||
| #define   DPB_HOTPLUG_INT_STATUS		(1 << 29) | ||||
| #define   HDMIC_HOTPLUG_INT_STATUS		(1 << 28) | ||||
| #define   DPC_HOTPLUG_INT_STATUS		(1 << 28) | ||||
| #define   HDMID_HOTPLUG_INT_STATUS		(1 << 27) | ||||
| #define   DPD_HOTPLUG_INT_STATUS		(1 << 27) | ||||
| /* HDMI/DP bits are gen4+ */ | ||||
| #define   DPB_HOTPLUG_LIVE_STATUS               (1 << 29) | ||||
| #define   DPC_HOTPLUG_LIVE_STATUS               (1 << 28) | ||||
| #define   DPD_HOTPLUG_LIVE_STATUS               (1 << 27) | ||||
| #define   DPD_HOTPLUG_INT_STATUS		(3 << 21) | ||||
| #define   DPC_HOTPLUG_INT_STATUS		(3 << 19) | ||||
| #define   DPB_HOTPLUG_INT_STATUS		(3 << 17) | ||||
| /* HDMI bits are shared with the DP bits */ | ||||
| #define   HDMIB_HOTPLUG_LIVE_STATUS             (1 << 29) | ||||
| #define   HDMIC_HOTPLUG_LIVE_STATUS             (1 << 28) | ||||
| #define   HDMID_HOTPLUG_LIVE_STATUS             (1 << 27) | ||||
| #define   HDMID_HOTPLUG_INT_STATUS		(3 << 21) | ||||
| #define   HDMIC_HOTPLUG_INT_STATUS		(3 << 19) | ||||
| #define   HDMIB_HOTPLUG_INT_STATUS		(3 << 17) | ||||
| /* CRT/TV common between gen3+ */ | ||||
| #define   CRT_HOTPLUG_INT_STATUS		(1 << 11) | ||||
| #define   TV_HOTPLUG_INT_STATUS			(1 << 10) | ||||
| #define   CRT_HOTPLUG_MONITOR_MASK		(3 << 8) | ||||
| #define   CRT_HOTPLUG_MONITOR_COLOR		(3 << 8) | ||||
| #define   CRT_HOTPLUG_MONITOR_MONO		(2 << 8) | ||||
| #define   CRT_HOTPLUG_MONITOR_NONE		(0 << 8) | ||||
| #define   SDVOC_HOTPLUG_INT_STATUS		(1 << 7) | ||||
| #define   SDVOB_HOTPLUG_INT_STATUS		(1 << 6) | ||||
| /* SDVO is different across gen3/4 */ | ||||
| #define   SDVOC_HOTPLUG_INT_STATUS_G4X		(1 << 3) | ||||
| #define   SDVOB_HOTPLUG_INT_STATUS_G4X		(1 << 2) | ||||
| #define   SDVOC_HOTPLUG_INT_STATUS_I965		(3 << 4) | ||||
| #define   SDVOB_HOTPLUG_INT_STATUS_I965		(3 << 2) | ||||
| #define   SDVOC_HOTPLUG_INT_STATUS_I915		(1 << 7) | ||||
| #define   SDVOB_HOTPLUG_INT_STATUS_I915		(1 << 6) | ||||
| 
 | ||||
| /* SDVO port control */ | ||||
| #define SDVOB			0x61140 | ||||
| @ -1711,8 +1759,10 @@ | ||||
| #define   VIDEO_DIP_PORT_C		(2 << 29) | ||||
| #define   VIDEO_DIP_PORT_D		(3 << 29) | ||||
| #define   VIDEO_DIP_PORT_MASK		(3 << 29) | ||||
| #define   VIDEO_DIP_ENABLE_GCP		(1 << 25) | ||||
| #define   VIDEO_DIP_ENABLE_AVI		(1 << 21) | ||||
| #define   VIDEO_DIP_ENABLE_VENDOR	(2 << 21) | ||||
| #define   VIDEO_DIP_ENABLE_GAMUT	(4 << 21) | ||||
| #define   VIDEO_DIP_ENABLE_SPD		(8 << 21) | ||||
| #define   VIDEO_DIP_SELECT_AVI		(0 << 19) | ||||
| #define   VIDEO_DIP_SELECT_VENDOR	(1 << 19) | ||||
| @ -1723,7 +1773,11 @@ | ||||
| #define   VIDEO_DIP_FREQ_2VSYNC		(2 << 16) | ||||
| #define   VIDEO_DIP_FREQ_MASK		(3 << 16) | ||||
| /* HSW and later: */ | ||||
| #define   VIDEO_DIP_ENABLE_VSC_HSW	(1 << 20) | ||||
| #define   VIDEO_DIP_ENABLE_GCP_HSW	(1 << 16) | ||||
| #define   VIDEO_DIP_ENABLE_AVI_HSW	(1 << 12) | ||||
| #define   VIDEO_DIP_ENABLE_VS_HSW	(1 << 8) | ||||
| #define   VIDEO_DIP_ENABLE_GMP_HSW	(1 << 4) | ||||
| #define   VIDEO_DIP_ENABLE_SPD_HSW	(1 << 0) | ||||
| 
 | ||||
| /* Panel power sequencing */ | ||||
| @ -1795,18 +1849,35 @@ | ||||
| #define PFIT_AUTO_RATIOS 0x61238 | ||||
| 
 | ||||
| /* Backlight control */ | ||||
| #define BLC_PWM_CTL		0x61254 | ||||
| #define   BACKLIGHT_MODULATION_FREQ_SHIFT		(17) | ||||
| #define BLC_PWM_CTL2		0x61250 /* 965+ only */ | ||||
| #define   BLM_COMBINATION_MODE (1 << 30) | ||||
| #define   BLM_PWM_ENABLE		(1 << 31) | ||||
| #define   BLM_COMBINATION_MODE		(1 << 30) /* gen4 only */ | ||||
| #define   BLM_PIPE_SELECT		(1 << 29) | ||||
| #define   BLM_PIPE_SELECT_IVB		(3 << 29) | ||||
| #define   BLM_PIPE_A			(0 << 29) | ||||
| #define   BLM_PIPE_B			(1 << 29) | ||||
| #define   BLM_PIPE_C			(2 << 29) /* ivb + */ | ||||
| #define   BLM_PIPE(pipe)		((pipe) << 29) | ||||
| #define   BLM_POLARITY_I965		(1 << 28) /* gen4 only */ | ||||
| #define   BLM_PHASE_IN_INTERUPT_STATUS	(1 << 26) | ||||
| #define   BLM_PHASE_IN_ENABLE		(1 << 25) | ||||
| #define   BLM_PHASE_IN_INTERUPT_ENABL	(1 << 24) | ||||
| #define   BLM_PHASE_IN_TIME_BASE_SHIFT	(16) | ||||
| #define   BLM_PHASE_IN_TIME_BASE_MASK	(0xff << 16) | ||||
| #define   BLM_PHASE_IN_COUNT_SHIFT	(8) | ||||
| #define   BLM_PHASE_IN_COUNT_MASK	(0xff << 8) | ||||
| #define   BLM_PHASE_IN_INCR_SHIFT	(0) | ||||
| #define   BLM_PHASE_IN_INCR_MASK	(0xff << 0) | ||||
| #define BLC_PWM_CTL		0x61254 | ||||
| /*
 | ||||
|  * This is the most significant 15 bits of the number of backlight cycles in a | ||||
|  * complete cycle of the modulated backlight control. | ||||
|  * | ||||
|  * The actual value is this field multiplied by two. | ||||
|  */ | ||||
| #define   BACKLIGHT_MODULATION_FREQ_MASK		(0x7fff << 17) | ||||
| #define   BLM_LEGACY_MODE				(1 << 16) | ||||
| #define   BACKLIGHT_MODULATION_FREQ_SHIFT	(17) | ||||
| #define   BACKLIGHT_MODULATION_FREQ_MASK	(0x7fff << 17) | ||||
| #define   BLM_LEGACY_MODE			(1 << 16) /* gen2 only */ | ||||
| /*
 | ||||
|  * This is the number of cycles out of the backlight modulation cycle for which | ||||
|  * the backlight is on. | ||||
| @ -1816,9 +1887,24 @@ | ||||
|  */ | ||||
| #define   BACKLIGHT_DUTY_CYCLE_SHIFT		(0) | ||||
| #define   BACKLIGHT_DUTY_CYCLE_MASK		(0xffff) | ||||
| #define   BACKLIGHT_DUTY_CYCLE_MASK_PNV		(0xfffe) | ||||
| #define   BLM_POLARITY_PNV			(1 << 0) /* pnv only */ | ||||
| 
 | ||||
| #define BLC_HIST_CTL		0x61260 | ||||
| 
 | ||||
| /* New registers for PCH-split platforms. Safe where new bits show up, the
 | ||||
|  * register layout machtes with gen4 BLC_PWM_CTL[12]. */ | ||||
| #define BLC_PWM_CPU_CTL2	0x48250 | ||||
| #define BLC_PWM_CPU_CTL		0x48254 | ||||
| 
 | ||||
| /* PCH CTL1 is totally different, all but the below bits are reserved. CTL2 is
 | ||||
|  * like the normal CTL from gen4 and earlier. Hooray for confusing naming. */ | ||||
| #define BLC_PWM_PCH_CTL1	0xc8250 | ||||
| #define   BLM_PCH_PWM_ENABLE			(1 << 31) | ||||
| #define   BLM_PCH_OVERRIDE_ENABLE		(1 << 30) | ||||
| #define   BLM_PCH_POLARITY			(1 << 29) | ||||
| #define BLC_PWM_PCH_CTL2	0xc8254 | ||||
| 
 | ||||
| /* TV port control */ | ||||
| #define TV_CTL			0x68000 | ||||
| /** Enables the TV encoder */ | ||||
| @ -2583,13 +2669,13 @@ | ||||
| #define PIPESTAT(pipe) _PIPE(pipe, _PIPEASTAT, _PIPEBSTAT) | ||||
| 
 | ||||
| #define VLV_DPFLIPSTAT				0x70028 | ||||
| #define   PIPEB_LINE_COMPARE_STATUS		(1<<29) | ||||
| #define   PIPEB_LINE_COMPARE_INT_EN		(1<<29) | ||||
| #define   PIPEB_HLINE_INT_EN			(1<<28) | ||||
| #define   PIPEB_VBLANK_INT_EN			(1<<27) | ||||
| #define   SPRITED_FLIPDONE_INT_EN		(1<<26) | ||||
| #define   SPRITEC_FLIPDONE_INT_EN		(1<<25) | ||||
| #define   PLANEB_FLIPDONE_INT_EN		(1<<24) | ||||
| #define   PIPEA_LINE_COMPARE_STATUS		(1<<21) | ||||
| #define   PIPEA_LINE_COMPARE_INT_EN		(1<<21) | ||||
| #define   PIPEA_HLINE_INT_EN			(1<<20) | ||||
| #define   PIPEA_VBLANK_INT_EN			(1<<19) | ||||
| #define   SPRITEB_FLIPDONE_INT_EN		(1<<18) | ||||
| @ -2897,13 +2983,14 @@ | ||||
| #define DSPSIZE(plane) _PIPE(plane, _DSPASIZE, _DSPBSIZE) | ||||
| #define DSPSURF(plane) _PIPE(plane, _DSPASURF, _DSPBSURF) | ||||
| #define DSPTILEOFF(plane) _PIPE(plane, _DSPATILEOFF, _DSPBTILEOFF) | ||||
| #define DSPLINOFF(plane) DSPADDR(plane) | ||||
| 
 | ||||
| /* Display/Sprite base address macros */ | ||||
| #define DISP_BASEADDR_MASK	(0xfffff000) | ||||
| #define I915_LO_DISPBASE(val)	(val & ~DISP_BASEADDR_MASK) | ||||
| #define I915_HI_DISPBASE(val)	(val & DISP_BASEADDR_MASK) | ||||
| #define I915_MODIFY_DISPBASE(reg, gfx_addr) \ | ||||
| 		(I915_WRITE(reg, gfx_addr | I915_LO_DISPBASE(I915_READ(reg)))) | ||||
| 		(I915_WRITE((reg), (gfx_addr) | I915_LO_DISPBASE(I915_READ(reg)))) | ||||
| 
 | ||||
| /* VBIOS flags */ | ||||
| #define SWF00			0x71410 | ||||
| @ -3771,6 +3858,9 @@ | ||||
| #define _FDI_RXA_TUSIZE2         0xf0038 | ||||
| #define _FDI_RXB_TUSIZE1         0xf1030 | ||||
| #define _FDI_RXB_TUSIZE2         0xf1038 | ||||
| #define  FDI_RX_TP1_TO_TP2_48	(2<<20) | ||||
| #define  FDI_RX_TP1_TO_TP2_64	(3<<20) | ||||
| #define  FDI_RX_FDI_DELAY_90	(0x90<<0) | ||||
| #define FDI_RX_MISC(pipe) _PIPE(pipe, _FDI_RXA_MISC, _FDI_RXB_MISC) | ||||
| #define FDI_RX_TUSIZE1(pipe) _PIPE(pipe, _FDI_RXA_TUSIZE1, _FDI_RXB_TUSIZE1) | ||||
| #define FDI_RX_TUSIZE2(pipe) _PIPE(pipe, _FDI_RXA_TUSIZE2, _FDI_RXB_TUSIZE2) | ||||
| @ -3824,7 +3914,6 @@ | ||||
| #define  ADPA_CRT_HOTPLUG_FORCE_TRIGGER (1<<16) | ||||
| 
 | ||||
| /* or SDVOB */ | ||||
| #define VLV_HDMIB 0x61140 | ||||
| #define HDMIB   0xe1140 | ||||
| #define  PORT_ENABLE    (1 << 31) | ||||
| #define  TRANSCODER(pipe)       ((pipe) << 30) | ||||
| @ -3855,20 +3944,18 @@ | ||||
| #define PCH_LVDS	0xe1180 | ||||
| #define  LVDS_DETECTED	(1 << 1) | ||||
| 
 | ||||
| #define BLC_PWM_CPU_CTL2	0x48250 | ||||
| #define  PWM_ENABLE		(1 << 31) | ||||
| #define  PWM_PIPE_A		(0 << 29) | ||||
| #define  PWM_PIPE_B		(1 << 29) | ||||
| #define BLC_PWM_CPU_CTL		0x48254 | ||||
| /* vlv has 2 sets of panel control regs. */ | ||||
| #define PIPEA_PP_STATUS         0x61200 | ||||
| #define PIPEA_PP_CONTROL        0x61204 | ||||
| #define PIPEA_PP_ON_DELAYS      0x61208 | ||||
| #define PIPEA_PP_OFF_DELAYS     0x6120c | ||||
| #define PIPEA_PP_DIVISOR        0x61210 | ||||
| 
 | ||||
| #define BLC_PWM_PCH_CTL1	0xc8250 | ||||
| #define  PWM_PCH_ENABLE		(1 << 31) | ||||
| #define  PWM_POLARITY_ACTIVE_LOW	(1 << 29) | ||||
| #define  PWM_POLARITY_ACTIVE_HIGH	(0 << 29) | ||||
| #define  PWM_POLARITY_ACTIVE_LOW2	(1 << 28) | ||||
| #define  PWM_POLARITY_ACTIVE_HIGH2	(0 << 28) | ||||
| 
 | ||||
| #define BLC_PWM_PCH_CTL2	0xc8254 | ||||
| #define PIPEB_PP_STATUS         0x61300 | ||||
| #define PIPEB_PP_CONTROL        0x61304 | ||||
| #define PIPEB_PP_ON_DELAYS      0x61308 | ||||
| #define PIPEB_PP_OFF_DELAYS     0x6130c | ||||
| #define PIPEB_PP_DIVISOR        0x61310 | ||||
| 
 | ||||
| #define PCH_PP_STATUS		0xc7200 | ||||
| #define PCH_PP_CONTROL		0xc7204 | ||||
| @ -3992,6 +4079,7 @@ | ||||
| #define  FORCEWAKE				0xA18C | ||||
| #define  FORCEWAKE_VLV				0x1300b0 | ||||
| #define  FORCEWAKE_ACK_VLV			0x1300b4 | ||||
| #define  FORCEWAKE_ACK_HSW			0x130044 | ||||
| #define  FORCEWAKE_ACK				0x130090 | ||||
| #define  FORCEWAKE_MT				0xa188 /* multi-threaded */ | ||||
| #define  FORCEWAKE_MT_ACK			0x130040 | ||||
| @ -4012,10 +4100,15 @@ | ||||
| # define GEN6_CSUNIT_CLOCK_GATE_DISABLE			(1 << 7) | ||||
| 
 | ||||
| #define GEN6_UCGCTL2				0x9404 | ||||
| # define GEN7_VDSUNIT_CLOCK_GATE_DISABLE		(1 << 30) | ||||
| # define GEN7_TDLUNIT_CLOCK_GATE_DISABLE		(1 << 22) | ||||
| # define GEN6_RCZUNIT_CLOCK_GATE_DISABLE		(1 << 13) | ||||
| # define GEN6_RCPBUNIT_CLOCK_GATE_DISABLE		(1 << 12) | ||||
| # define GEN6_RCCUNIT_CLOCK_GATE_DISABLE		(1 << 11) | ||||
| 
 | ||||
| #define GEN7_UCGCTL4				0x940c | ||||
| #define  GEN7_L3BANK2X_CLOCK_GATE_DISABLE	(1<<25) | ||||
| 
 | ||||
| #define GEN6_RPNSWREQ				0xA008 | ||||
| #define   GEN6_TURBO_DISABLE			(1<<31) | ||||
| #define   GEN6_FREQUENCY(x)			((x)<<25) | ||||
| @ -4047,6 +4140,7 @@ | ||||
| #define   GEN6_RP_UP_IDLE_MIN			(0x1<<3) | ||||
| #define   GEN6_RP_UP_BUSY_AVG			(0x2<<3) | ||||
| #define   GEN6_RP_UP_BUSY_CONT			(0x4<<3) | ||||
| #define   GEN7_RP_DOWN_IDLE_AVG			(0x2<<0) | ||||
| #define   GEN6_RP_DOWN_IDLE_CONT		(0x1<<0) | ||||
| #define GEN6_RP_UP_THRESHOLD			0xA02C | ||||
| #define GEN6_RP_DOWN_THRESHOLD			0xA030 | ||||
| @ -4111,6 +4205,26 @@ | ||||
| #define   GEN6_RC6			3 | ||||
| #define   GEN6_RC7			4 | ||||
| 
 | ||||
| #define GEN7_MISCCPCTL			(0x9424) | ||||
| #define   GEN7_DOP_CLOCK_GATE_ENABLE	(1<<0) | ||||
| 
 | ||||
| /* IVYBRIDGE DPF */ | ||||
| #define GEN7_L3CDERRST1			0xB008 /* L3CD Error Status 1 */ | ||||
| #define   GEN7_L3CDERRST1_ROW_MASK	(0x7ff<<14) | ||||
| #define   GEN7_PARITY_ERROR_VALID	(1<<13) | ||||
| #define   GEN7_L3CDERRST1_BANK_MASK	(3<<11) | ||||
| #define   GEN7_L3CDERRST1_SUBBANK_MASK	(7<<8) | ||||
| #define GEN7_PARITY_ERROR_ROW(reg) \ | ||||
| 		((reg & GEN7_L3CDERRST1_ROW_MASK) >> 14) | ||||
| #define GEN7_PARITY_ERROR_BANK(reg) \ | ||||
| 		((reg & GEN7_L3CDERRST1_BANK_MASK) >> 11) | ||||
| #define GEN7_PARITY_ERROR_SUBBANK(reg) \ | ||||
| 		((reg & GEN7_L3CDERRST1_SUBBANK_MASK) >> 8) | ||||
| #define   GEN7_L3CDERRST1_ENABLE	(1<<7) | ||||
| 
 | ||||
| #define GEN7_L3LOG_BASE			0xB070 | ||||
| #define GEN7_L3LOG_SIZE			0x80 | ||||
| 
 | ||||
| #define G4X_AUD_VID_DID			0x62020 | ||||
| #define INTEL_AUDIO_DEVCL		0x808629FB | ||||
| #define INTEL_AUDIO_DEVBLC		0x80862801 | ||||
| @ -4177,7 +4291,7 @@ | ||||
| 					PIPE_DDI_FUNC_CTL_B) | ||||
| #define  PIPE_DDI_FUNC_ENABLE		(1<<31) | ||||
| /* Those bits are ignored by pipe EDP since it can only connect to DDI A */ | ||||
| #define  PIPE_DDI_PORT_MASK				(0xf<<28) | ||||
| #define  PIPE_DDI_PORT_MASK			(7<<28) | ||||
| #define  PIPE_DDI_SELECT_PORT(x)		((x)<<28) | ||||
| #define  PIPE_DDI_MODE_SELECT_HDMI		(0<<24) | ||||
| #define  PIPE_DDI_MODE_SELECT_DVI		(1<<24) | ||||
| @ -4335,7 +4449,7 @@ | ||||
| #define PIPE_WM_LINETIME_B		0x45274 | ||||
| #define PIPE_WM_LINETIME(pipe) _PIPE(pipe, \ | ||||
| 					PIPE_WM_LINETIME_A, \ | ||||
| 					PIPE_WM_LINETIME_A) | ||||
| 					PIPE_WM_LINETIME_B) | ||||
| #define   PIPE_WM_LINETIME_MASK		(0x1ff) | ||||
| #define   PIPE_WM_LINETIME_TIME(x)			((x)) | ||||
| #define   PIPE_WM_LINETIME_IPS_LINETIME_MASK	(0x1ff<<16) | ||||
| @ -4347,4 +4461,9 @@ | ||||
| #define  SFUSE_STRAP_DDIC_DETECTED	(1<<1) | ||||
| #define  SFUSE_STRAP_DDID_DETECTED	(1<<0) | ||||
| 
 | ||||
| #define WM_DBG				0x45280 | ||||
| #define  WM_DBG_DISALLOW_MULTIPLE_LP	(1<<0) | ||||
| #define  WM_DBG_DISALLOW_MAXFIFO	(1<<1) | ||||
| #define  WM_DBG_DISALLOW_SPRITE		(1<<2) | ||||
| 
 | ||||
| #endif /* _I915_REG_H_ */ | ||||
|  | ||||
| @ -828,10 +828,7 @@ int i915_save_state(struct drm_device *dev) | ||||
| 		dev_priv->saveIMR = I915_READ(IMR); | ||||
| 	} | ||||
| 
 | ||||
| 	if (IS_IRONLAKE_M(dev)) | ||||
| 		ironlake_disable_drps(dev); | ||||
| 	if (INTEL_INFO(dev)->gen >= 6) | ||||
| 		gen6_disable_rps(dev); | ||||
| 	intel_disable_gt_powersave(dev); | ||||
| 
 | ||||
| 	/* Cache mode state */ | ||||
| 	dev_priv->saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0); | ||||
|  | ||||
| @ -29,6 +29,7 @@ | ||||
| #include <linux/module.h> | ||||
| #include <linux/stat.h> | ||||
| #include <linux/sysfs.h> | ||||
| #include "intel_drv.h" | ||||
| #include "i915_drv.h" | ||||
| 
 | ||||
| static u32 calc_residency(struct drm_device *dev, const u32 reg) | ||||
| @ -92,20 +93,134 @@ static struct attribute_group rc6_attr_group = { | ||||
| 	.attrs =  rc6_attrs | ||||
| }; | ||||
| 
 | ||||
| static int l3_access_valid(struct drm_device *dev, loff_t offset) | ||||
| { | ||||
| 	if (!IS_IVYBRIDGE(dev)) | ||||
| 		return -EPERM; | ||||
| 
 | ||||
| 	if (offset % 4 != 0) | ||||
| 		return -EINVAL; | ||||
| 
 | ||||
| 	if (offset >= GEN7_L3LOG_SIZE) | ||||
| 		return -ENXIO; | ||||
| 
 | ||||
| 	return 0; | ||||
| } | ||||
| 
 | ||||
| static ssize_t | ||||
| i915_l3_read(struct file *filp, struct kobject *kobj, | ||||
| 	     struct bin_attribute *attr, char *buf, | ||||
| 	     loff_t offset, size_t count) | ||||
| { | ||||
| 	struct device *dev = container_of(kobj, struct device, kobj); | ||||
| 	struct drm_minor *dminor = container_of(dev, struct drm_minor, kdev); | ||||
| 	struct drm_device *drm_dev = dminor->dev; | ||||
| 	struct drm_i915_private *dev_priv = drm_dev->dev_private; | ||||
| 	uint32_t misccpctl; | ||||
| 	int i, ret; | ||||
| 
 | ||||
| 	ret = l3_access_valid(drm_dev, offset); | ||||
| 	if (ret) | ||||
| 		return ret; | ||||
| 
 | ||||
| 	ret = i915_mutex_lock_interruptible(drm_dev); | ||||
| 	if (ret) | ||||
| 		return ret; | ||||
| 
 | ||||
| 	misccpctl = I915_READ(GEN7_MISCCPCTL); | ||||
| 	I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE); | ||||
| 
 | ||||
| 	for (i = offset; count >= 4 && i < GEN7_L3LOG_SIZE; i += 4, count -= 4) | ||||
| 		*((uint32_t *)(&buf[i])) = I915_READ(GEN7_L3LOG_BASE + i); | ||||
| 
 | ||||
| 	I915_WRITE(GEN7_MISCCPCTL, misccpctl); | ||||
| 
 | ||||
| 	mutex_unlock(&drm_dev->struct_mutex); | ||||
| 
 | ||||
| 	return i - offset; | ||||
| } | ||||
| 
 | ||||
| static ssize_t | ||||
| i915_l3_write(struct file *filp, struct kobject *kobj, | ||||
| 	      struct bin_attribute *attr, char *buf, | ||||
| 	      loff_t offset, size_t count) | ||||
| { | ||||
| 	struct device *dev = container_of(kobj, struct device, kobj); | ||||
| 	struct drm_minor *dminor = container_of(dev, struct drm_minor, kdev); | ||||
| 	struct drm_device *drm_dev = dminor->dev; | ||||
| 	struct drm_i915_private *dev_priv = drm_dev->dev_private; | ||||
| 	u32 *temp = NULL; /* Just here to make handling failures easy */ | ||||
| 	int ret; | ||||
| 
 | ||||
| 	ret = l3_access_valid(drm_dev, offset); | ||||
| 	if (ret) | ||||
| 		return ret; | ||||
| 
 | ||||
| 	ret = i915_mutex_lock_interruptible(drm_dev); | ||||
| 	if (ret) | ||||
| 		return ret; | ||||
| 
 | ||||
| 	if (!dev_priv->mm.l3_remap_info) { | ||||
| 		temp = kzalloc(GEN7_L3LOG_SIZE, GFP_KERNEL); | ||||
| 		if (!temp) { | ||||
| 			mutex_unlock(&drm_dev->struct_mutex); | ||||
| 			return -ENOMEM; | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	ret = i915_gpu_idle(drm_dev); | ||||
| 	if (ret) { | ||||
| 		kfree(temp); | ||||
| 		mutex_unlock(&drm_dev->struct_mutex); | ||||
| 		return ret; | ||||
| 	} | ||||
| 
 | ||||
| 	/* TODO: Ideally we really want a GPU reset here to make sure errors
 | ||||
| 	 * aren't propagated. Since I cannot find a stable way to reset the GPU | ||||
| 	 * at this point it is left as a TODO. | ||||
| 	*/ | ||||
| 	if (temp) | ||||
| 		dev_priv->mm.l3_remap_info = temp; | ||||
| 
 | ||||
| 	memcpy(dev_priv->mm.l3_remap_info + (offset/4), | ||||
| 	       buf + (offset/4), | ||||
| 	       count); | ||||
| 
 | ||||
| 	i915_gem_l3_remap(drm_dev); | ||||
| 
 | ||||
| 	mutex_unlock(&drm_dev->struct_mutex); | ||||
| 
 | ||||
| 	return count; | ||||
| } | ||||
| 
 | ||||
| static struct bin_attribute dpf_attrs = { | ||||
| 	.attr = {.name = "l3_parity", .mode = (S_IRUSR | S_IWUSR)}, | ||||
| 	.size = GEN7_L3LOG_SIZE, | ||||
| 	.read = i915_l3_read, | ||||
| 	.write = i915_l3_write, | ||||
| 	.mmap = NULL | ||||
| }; | ||||
| 
 | ||||
| void i915_setup_sysfs(struct drm_device *dev) | ||||
| { | ||||
| 	int ret; | ||||
| 
 | ||||
| 	/* ILK doesn't have any residency information */ | ||||
| 	if (INTEL_INFO(dev)->gen < 6) | ||||
| 		return; | ||||
| 	if (INTEL_INFO(dev)->gen >= 6) { | ||||
| 		ret = sysfs_merge_group(&dev->primary->kdev.kobj, | ||||
| 					&rc6_attr_group); | ||||
| 		if (ret) | ||||
| 			DRM_ERROR("RC6 residency sysfs setup failed\n"); | ||||
| 	} | ||||
| 
 | ||||
| 	ret = sysfs_merge_group(&dev->primary->kdev.kobj, &rc6_attr_group); | ||||
| 	if (ret) | ||||
| 		DRM_ERROR("sysfs setup failed\n"); | ||||
| 	if (IS_IVYBRIDGE(dev)) { | ||||
| 		ret = device_create_bin_file(&dev->primary->kdev, &dpf_attrs); | ||||
| 		if (ret) | ||||
| 			DRM_ERROR("l3 parity sysfs setup failed\n"); | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| void i915_teardown_sysfs(struct drm_device *dev) | ||||
| { | ||||
| 	device_remove_bin_file(&dev->primary->kdev,  &dpf_attrs); | ||||
| 	sysfs_unmerge_group(&dev->primary->kdev.kobj, &rc6_attr_group); | ||||
| } | ||||
|  | ||||
| @ -311,9 +311,33 @@ DEFINE_EVENT(i915_gem_request, i915_gem_request_retire, | ||||
| 	    TP_ARGS(ring, seqno) | ||||
| ); | ||||
| 
 | ||||
| DEFINE_EVENT(i915_gem_request, i915_gem_request_wait_begin, | ||||
| TRACE_EVENT(i915_gem_request_wait_begin, | ||||
| 	    TP_PROTO(struct intel_ring_buffer *ring, u32 seqno), | ||||
| 	    TP_ARGS(ring, seqno) | ||||
| 	    TP_ARGS(ring, seqno), | ||||
| 
 | ||||
| 	    TP_STRUCT__entry( | ||||
| 			     __field(u32, dev) | ||||
| 			     __field(u32, ring) | ||||
| 			     __field(u32, seqno) | ||||
| 			     __field(bool, blocking) | ||||
| 			     ), | ||||
| 
 | ||||
| 	    /* NB: the blocking information is racy since mutex_is_locked
 | ||||
| 	     * doesn't check that the current thread holds the lock. The only | ||||
| 	     * other option would be to pass the boolean information of whether | ||||
| 	     * or not the class was blocking down through the stack which is | ||||
| 	     * less desirable. | ||||
| 	     */ | ||||
| 	    TP_fast_assign( | ||||
| 			   __entry->dev = ring->dev->primary->index; | ||||
| 			   __entry->ring = ring->id; | ||||
| 			   __entry->seqno = seqno; | ||||
| 			   __entry->blocking = mutex_is_locked(&ring->dev->struct_mutex); | ||||
| 			   ), | ||||
| 
 | ||||
| 	    TP_printk("dev=%u, ring=%u, seqno=%u, blocking=%s", | ||||
| 		      __entry->dev, __entry->ring, __entry->seqno, | ||||
| 		      __entry->blocking ?  "yes (NB)" : "no") | ||||
| ); | ||||
| 
 | ||||
| DEFINE_EVENT(i915_gem_request, i915_gem_request_wait_end, | ||||
|  | ||||
| @ -692,7 +692,7 @@ static const struct dmi_system_id intel_no_opregion_vbt[] = { | ||||
|  * | ||||
|  * Returns 0 on success, nonzero on failure. | ||||
|  */ | ||||
| bool | ||||
| int | ||||
| intel_parse_bios(struct drm_device *dev) | ||||
| { | ||||
| 	struct drm_i915_private *dev_priv = dev->dev_private; | ||||
|  | ||||
| @ -476,7 +476,7 @@ struct bdb_edp { | ||||
| } __attribute__ ((packed)); | ||||
| 
 | ||||
| void intel_setup_bios(struct drm_device *dev); | ||||
| bool intel_parse_bios(struct drm_device *dev); | ||||
| int intel_parse_bios(struct drm_device *dev); | ||||
| 
 | ||||
| /*
 | ||||
|  * Driver<->VBIOS interaction occurs through scratch bits in | ||||
|  | ||||
| @ -88,6 +88,9 @@ static void gmch_crt_dpms(struct drm_encoder *encoder, int mode) | ||||
| 	temp &= ~(ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE); | ||||
| 	temp &= ~ADPA_DAC_ENABLE; | ||||
| 
 | ||||
| 	if (IS_VALLEYVIEW(dev) && mode != DRM_MODE_DPMS_ON) | ||||
| 		mode = DRM_MODE_DPMS_OFF; | ||||
| 
 | ||||
| 	switch (mode) { | ||||
| 	case DRM_MODE_DPMS_ON: | ||||
| 		temp |= ADPA_DAC_ENABLE; | ||||
| @ -129,7 +132,7 @@ static int intel_crt_mode_valid(struct drm_connector *connector, | ||||
| } | ||||
| 
 | ||||
| static bool intel_crt_mode_fixup(struct drm_encoder *encoder, | ||||
| 				 struct drm_display_mode *mode, | ||||
| 				 const struct drm_display_mode *mode, | ||||
| 				 struct drm_display_mode *adjusted_mode) | ||||
| { | ||||
| 	return true; | ||||
| @ -230,6 +233,42 @@ static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector) | ||||
| 	return ret; | ||||
| } | ||||
| 
 | ||||
| static bool valleyview_crt_detect_hotplug(struct drm_connector *connector) | ||||
| { | ||||
| 	struct drm_device *dev = connector->dev; | ||||
| 	struct drm_i915_private *dev_priv = dev->dev_private; | ||||
| 	u32 adpa; | ||||
| 	bool ret; | ||||
| 	u32 save_adpa; | ||||
| 
 | ||||
| 	save_adpa = adpa = I915_READ(ADPA); | ||||
| 	DRM_DEBUG_KMS("trigger hotplug detect cycle: adpa=0x%x\n", adpa); | ||||
| 
 | ||||
| 	adpa |= ADPA_CRT_HOTPLUG_FORCE_TRIGGER; | ||||
| 
 | ||||
| 	I915_WRITE(ADPA, adpa); | ||||
| 
 | ||||
| 	if (wait_for((I915_READ(ADPA) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) == 0, | ||||
| 		     1000)) { | ||||
| 		DRM_DEBUG_KMS("timed out waiting for FORCE_TRIGGER"); | ||||
| 		I915_WRITE(ADPA, save_adpa); | ||||
| 	} | ||||
| 
 | ||||
| 	/* Check the status to see if both blue and green are on now */ | ||||
| 	adpa = I915_READ(ADPA); | ||||
| 	if ((adpa & ADPA_CRT_HOTPLUG_MONITOR_MASK) != 0) | ||||
| 		ret = true; | ||||
| 	else | ||||
| 		ret = false; | ||||
| 
 | ||||
| 	DRM_DEBUG_KMS("valleyview hotplug adpa=0x%x, result %d\n", adpa, ret); | ||||
| 
 | ||||
| 	/* FIXME: debug force function and remove */ | ||||
| 	ret = true; | ||||
| 
 | ||||
| 	return ret; | ||||
| } | ||||
| 
 | ||||
| /**
 | ||||
|  * Uses CRT_HOTPLUG_EN and CRT_HOTPLUG_STAT to detect CRT presence. | ||||
|  * | ||||
| @ -249,6 +288,9 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector) | ||||
| 	if (HAS_PCH_SPLIT(dev)) | ||||
| 		return intel_ironlake_crt_detect_hotplug(connector); | ||||
| 
 | ||||
| 	if (IS_VALLEYVIEW(dev)) | ||||
| 		return valleyview_crt_detect_hotplug(connector); | ||||
| 
 | ||||
| 	/*
 | ||||
| 	 * On 4 series desktop, CRT detect sequence need to be done twice | ||||
| 	 * to get a reliable result. | ||||
| @ -288,39 +330,34 @@ static bool intel_crt_detect_ddc(struct drm_connector *connector) | ||||
| { | ||||
| 	struct intel_crt *crt = intel_attached_crt(connector); | ||||
| 	struct drm_i915_private *dev_priv = crt->base.base.dev->dev_private; | ||||
| 	struct edid *edid; | ||||
| 	struct i2c_adapter *i2c; | ||||
| 
 | ||||
| 	/* CRT should always be at 0, but check anyway */ | ||||
| 	if (crt->base.type != INTEL_OUTPUT_ANALOG) | ||||
| 		return false; | ||||
| 	BUG_ON(crt->base.type != INTEL_OUTPUT_ANALOG); | ||||
| 
 | ||||
| 	if (intel_ddc_probe(&crt->base, dev_priv->crt_ddc_pin)) { | ||||
| 		struct edid *edid; | ||||
| 		bool is_digital = false; | ||||
| 		struct i2c_adapter *i2c; | ||||
| 	i2c = intel_gmbus_get_adapter(dev_priv, dev_priv->crt_ddc_pin); | ||||
| 	edid = drm_get_edid(connector, i2c); | ||||
| 
 | ||||
| 	if (edid) { | ||||
| 		bool is_digital = edid->input & DRM_EDID_INPUT_DIGITAL; | ||||
| 
 | ||||
| 		i2c = intel_gmbus_get_adapter(dev_priv, dev_priv->crt_ddc_pin); | ||||
| 		edid = drm_get_edid(connector, i2c); | ||||
| 		/*
 | ||||
| 		 * This may be a DVI-I connector with a shared DDC | ||||
| 		 * link between analog and digital outputs, so we | ||||
| 		 * have to check the EDID input spec of the attached device. | ||||
| 		 * | ||||
| 		 * On the other hand, what should we do if it is a broken EDID? | ||||
| 		 */ | ||||
| 		if (edid != NULL) { | ||||
| 			is_digital = edid->input & DRM_EDID_INPUT_DIGITAL; | ||||
| 			connector->display_info.raw_edid = NULL; | ||||
| 			kfree(edid); | ||||
| 		} | ||||
| 
 | ||||
| 		if (!is_digital) { | ||||
| 			DRM_DEBUG_KMS("CRT detected via DDC:0x50 [EDID]\n"); | ||||
| 			return true; | ||||
| 		} else { | ||||
| 			DRM_DEBUG_KMS("CRT not detected via DDC:0x50 [EDID reports a digital panel]\n"); | ||||
| 		} | ||||
| 
 | ||||
| 		DRM_DEBUG_KMS("CRT not detected via DDC:0x50 [EDID reports a digital panel]\n"); | ||||
| 	} else { | ||||
| 		DRM_DEBUG_KMS("CRT not detected via DDC:0x50 [no valid EDID found]\n"); | ||||
| 	} | ||||
| 
 | ||||
| 	kfree(edid); | ||||
| 
 | ||||
| 	return false; | ||||
| } | ||||
| 
 | ||||
| @ -453,18 +490,27 @@ intel_crt_detect(struct drm_connector *connector, bool force) | ||||
| 	struct intel_load_detect_pipe tmp; | ||||
| 
 | ||||
| 	if (I915_HAS_HOTPLUG(dev)) { | ||||
| 		/* We can not rely on the HPD pin always being correctly wired
 | ||||
| 		 * up, for example many KVM do not pass it through, and so | ||||
| 		 * only trust an assertion that the monitor is connected. | ||||
| 		 */ | ||||
| 		if (intel_crt_detect_hotplug(connector)) { | ||||
| 			DRM_DEBUG_KMS("CRT detected via hotplug\n"); | ||||
| 			return connector_status_connected; | ||||
| 		} else { | ||||
| 		} else | ||||
| 			DRM_DEBUG_KMS("CRT not detected via hotplug\n"); | ||||
| 			return connector_status_disconnected; | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	if (intel_crt_detect_ddc(connector)) | ||||
| 		return connector_status_connected; | ||||
| 
 | ||||
| 	/* Load detection is broken on HPD capable machines. Whoever wants a
 | ||||
| 	 * broken monitor (without edid) to work behind a broken kvm (that fails | ||||
| 	 * to have the right resistors for HP detection) needs to fix this up. | ||||
| 	 * For now just bail out. */ | ||||
| 	if (I915_HAS_HOTPLUG(dev)) | ||||
| 		return connector_status_disconnected; | ||||
| 
 | ||||
| 	if (!force) | ||||
| 		return connector->status; | ||||
| 
 | ||||
|  | ||||
| @ -170,6 +170,15 @@ void hsw_fdi_link_train(struct drm_crtc *crtc) | ||||
| 
 | ||||
| 		udelay(600); | ||||
| 
 | ||||
| 		/* We need to program FDI_RX_MISC with the default TP1 to TP2
 | ||||
| 		 * values before enabling the receiver, and configure the delay | ||||
| 		 * for the FDI timing generator to 90h. Luckily, all the other | ||||
| 		 * bits are supposed to be zeroed, so we can write those values | ||||
| 		 * directly. | ||||
| 		 */ | ||||
| 		I915_WRITE(FDI_RX_MISC(pipe), FDI_RX_TP1_TO_TP2_48 | | ||||
| 				FDI_RX_FDI_DELAY_90); | ||||
| 
 | ||||
| 		/* Enable CPU FDI Receiver with auto-training */ | ||||
| 		reg = FDI_RX_CTL(pipe); | ||||
| 		I915_WRITE(reg, | ||||
| @ -726,8 +735,7 @@ void intel_ddi_mode_set(struct drm_encoder *encoder, | ||||
| 
 | ||||
| 	I915_WRITE(DDI_FUNC_CTL(pipe), temp); | ||||
| 
 | ||||
| 	intel_hdmi_set_avi_infoframe(encoder, adjusted_mode); | ||||
| 	intel_hdmi_set_spd_infoframe(encoder); | ||||
| 	intel_hdmi->set_infoframes(encoder, adjusted_mode); | ||||
| } | ||||
| 
 | ||||
| void intel_ddi_dpms(struct drm_encoder *encoder, int mode) | ||||
|  | ||||
| @ -98,6 +98,11 @@ intel_find_pll_ironlake_dp(const intel_limit_t *, struct drm_crtc *crtc, | ||||
| 			   int target, int refclk, intel_clock_t *match_clock, | ||||
| 			   intel_clock_t *best_clock); | ||||
| 
 | ||||
| static bool | ||||
| intel_vlv_find_best_pll(const intel_limit_t *limit, struct drm_crtc *crtc, | ||||
| 			int target, int refclk, intel_clock_t *match_clock, | ||||
| 			intel_clock_t *best_clock); | ||||
| 
 | ||||
| static inline u32 /* units of 100MHz */ | ||||
| intel_fdi_link_freq(struct drm_device *dev) | ||||
| { | ||||
| @ -359,6 +364,48 @@ static const intel_limit_t intel_limits_ironlake_display_port = { | ||||
| 	.find_pll = intel_find_pll_ironlake_dp, | ||||
| }; | ||||
| 
 | ||||
| static const intel_limit_t intel_limits_vlv_dac = { | ||||
| 	.dot = { .min = 25000, .max = 270000 }, | ||||
| 	.vco = { .min = 4000000, .max = 6000000 }, | ||||
| 	.n = { .min = 1, .max = 7 }, | ||||
| 	.m = { .min = 22, .max = 450 }, /* guess */ | ||||
| 	.m1 = { .min = 2, .max = 3 }, | ||||
| 	.m2 = { .min = 11, .max = 156 }, | ||||
| 	.p = { .min = 10, .max = 30 }, | ||||
| 	.p1 = { .min = 2, .max = 3 }, | ||||
| 	.p2 = { .dot_limit = 270000, | ||||
| 		.p2_slow = 2, .p2_fast = 20 }, | ||||
| 	.find_pll = intel_vlv_find_best_pll, | ||||
| }; | ||||
| 
 | ||||
| static const intel_limit_t intel_limits_vlv_hdmi = { | ||||
| 	.dot = { .min = 20000, .max = 165000 }, | ||||
| 	.vco = { .min = 5994000, .max = 4000000 }, | ||||
| 	.n = { .min = 1, .max = 7 }, | ||||
| 	.m = { .min = 60, .max = 300 }, /* guess */ | ||||
| 	.m1 = { .min = 2, .max = 3 }, | ||||
| 	.m2 = { .min = 11, .max = 156 }, | ||||
| 	.p = { .min = 10, .max = 30 }, | ||||
| 	.p1 = { .min = 2, .max = 3 }, | ||||
| 	.p2 = { .dot_limit = 270000, | ||||
| 		.p2_slow = 2, .p2_fast = 20 }, | ||||
| 	.find_pll = intel_vlv_find_best_pll, | ||||
| }; | ||||
| 
 | ||||
| static const intel_limit_t intel_limits_vlv_dp = { | ||||
| 	.dot = { .min = 162000, .max = 270000 }, | ||||
| 	.vco = { .min = 5994000, .max = 4000000 }, | ||||
| 	.n = { .min = 1, .max = 7 }, | ||||
| 	.m = { .min = 60, .max = 300 }, /* guess */ | ||||
| 	.m1 = { .min = 2, .max = 3 }, | ||||
| 	.m2 = { .min = 11, .max = 156 }, | ||||
| 	.p = { .min = 10, .max = 30 }, | ||||
| 	.p1 = { .min = 2, .max = 3 }, | ||||
| 	.p2 = { .dot_limit = 270000, | ||||
| 		.p2_slow = 2, .p2_fast = 20 }, | ||||
| 	.find_pll = intel_vlv_find_best_pll, | ||||
| }; | ||||
| 
 | ||||
| u32 intel_dpio_read(struct drm_i915_private *dev_priv, int reg) | ||||
| { | ||||
| 	unsigned long flags; | ||||
| @ -384,6 +431,28 @@ out_unlock: | ||||
| 	return val; | ||||
| } | ||||
| 
 | ||||
| static void intel_dpio_write(struct drm_i915_private *dev_priv, int reg, | ||||
| 			     u32 val) | ||||
| { | ||||
| 	unsigned long flags; | ||||
| 
 | ||||
| 	spin_lock_irqsave(&dev_priv->dpio_lock, flags); | ||||
| 	if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100)) { | ||||
| 		DRM_ERROR("DPIO idle wait timed out\n"); | ||||
| 		goto out_unlock; | ||||
| 	} | ||||
| 
 | ||||
| 	I915_WRITE(DPIO_DATA, val); | ||||
| 	I915_WRITE(DPIO_REG, reg); | ||||
| 	I915_WRITE(DPIO_PKT, DPIO_RID | DPIO_OP_WRITE | DPIO_PORTID | | ||||
| 		   DPIO_BYTE); | ||||
| 	if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100)) | ||||
| 		DRM_ERROR("DPIO write wait timed out\n"); | ||||
| 
 | ||||
| out_unlock: | ||||
|        spin_unlock_irqrestore(&dev_priv->dpio_lock, flags); | ||||
| } | ||||
| 
 | ||||
| static void vlv_init_dpio(struct drm_device *dev) | ||||
| { | ||||
| 	struct drm_i915_private *dev_priv = dev->dev_private; | ||||
| @ -434,7 +503,7 @@ static bool is_dual_link_lvds(struct drm_i915_private *dev_priv, | ||||
| 		 * register is uninitialized. | ||||
| 		 */ | ||||
| 		val = I915_READ(reg); | ||||
| 		if (!(val & ~LVDS_DETECTED)) | ||||
| 		if (!(val & ~(LVDS_PIPE_MASK | LVDS_DETECTED))) | ||||
| 			val = dev_priv->bios_lvds_val; | ||||
| 		dev_priv->lvds_val = val; | ||||
| 	} | ||||
| @ -510,6 +579,13 @@ static const intel_limit_t *intel_limit(struct drm_crtc *crtc, int refclk) | ||||
| 			limit = &intel_limits_pineview_lvds; | ||||
| 		else | ||||
| 			limit = &intel_limits_pineview_sdvo; | ||||
| 	} else if (IS_VALLEYVIEW(dev)) { | ||||
| 		if (intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG)) | ||||
| 			limit = &intel_limits_vlv_dac; | ||||
| 		else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI)) | ||||
| 			limit = &intel_limits_vlv_hdmi; | ||||
| 		else | ||||
| 			limit = &intel_limits_vlv_dp; | ||||
| 	} else if (!IS_GEN2(dev)) { | ||||
| 		if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) | ||||
| 			limit = &intel_limits_i9xx_lvds; | ||||
| @ -551,11 +627,10 @@ static void intel_clock(struct drm_device *dev, int refclk, intel_clock_t *clock | ||||
| bool intel_pipe_has_type(struct drm_crtc *crtc, int type) | ||||
| { | ||||
| 	struct drm_device *dev = crtc->dev; | ||||
| 	struct drm_mode_config *mode_config = &dev->mode_config; | ||||
| 	struct intel_encoder *encoder; | ||||
| 
 | ||||
| 	list_for_each_entry(encoder, &mode_config->encoder_list, base.head) | ||||
| 		if (encoder->base.crtc == crtc && encoder->type == type) | ||||
| 	for_each_encoder_on_crtc(dev, crtc, encoder) | ||||
| 		if (encoder->type == type) | ||||
| 			return true; | ||||
| 
 | ||||
| 	return false; | ||||
| @ -783,6 +858,73 @@ intel_find_pll_g4x_dp(const intel_limit_t *limit, struct drm_crtc *crtc, | ||||
| 	memcpy(best_clock, &clock, sizeof(intel_clock_t)); | ||||
| 	return true; | ||||
| } | ||||
| static bool | ||||
| intel_vlv_find_best_pll(const intel_limit_t *limit, struct drm_crtc *crtc, | ||||
| 			int target, int refclk, intel_clock_t *match_clock, | ||||
| 			intel_clock_t *best_clock) | ||||
| { | ||||
| 	u32 p1, p2, m1, m2, vco, bestn, bestm1, bestm2, bestp1, bestp2; | ||||
| 	u32 m, n, fastclk; | ||||
| 	u32 updrate, minupdate, fracbits, p; | ||||
| 	unsigned long bestppm, ppm, absppm; | ||||
| 	int dotclk, flag; | ||||
| 
 | ||||
| 	dotclk = target * 1000; | ||||
| 	bestppm = 1000000; | ||||
| 	ppm = absppm = 0; | ||||
| 	fastclk = dotclk / (2*100); | ||||
| 	updrate = 0; | ||||
| 	minupdate = 19200; | ||||
| 	fracbits = 1; | ||||
| 	n = p = p1 = p2 = m = m1 = m2 = vco = bestn = 0; | ||||
| 	bestm1 = bestm2 = bestp1 = bestp2 = 0; | ||||
| 
 | ||||
| 	/* based on hardware requirement, prefer smaller n to precision */ | ||||
| 	for (n = limit->n.min; n <= ((refclk) / minupdate); n++) { | ||||
| 		updrate = refclk / n; | ||||
| 		for (p1 = limit->p1.max; p1 > limit->p1.min; p1--) { | ||||
| 			for (p2 = limit->p2.p2_fast+1; p2 > 0; p2--) { | ||||
| 				if (p2 > 10) | ||||
| 					p2 = p2 - 1; | ||||
| 				p = p1 * p2; | ||||
| 				/* based on hardware requirement, prefer bigger m1,m2 values */ | ||||
| 				for (m1 = limit->m1.min; m1 <= limit->m1.max; m1++) { | ||||
| 					m2 = (((2*(fastclk * p * n / m1 )) + | ||||
| 					       refclk) / (2*refclk)); | ||||
| 					m = m1 * m2; | ||||
| 					vco = updrate * m; | ||||
| 					if (vco >= limit->vco.min && vco < limit->vco.max) { | ||||
| 						ppm = 1000000 * ((vco / p) - fastclk) / fastclk; | ||||
| 						absppm = (ppm > 0) ? ppm : (-ppm); | ||||
| 						if (absppm < 100 && ((p1 * p2) > (bestp1 * bestp2))) { | ||||
| 							bestppm = 0; | ||||
| 							flag = 1; | ||||
| 						} | ||||
| 						if (absppm < bestppm - 10) { | ||||
| 							bestppm = absppm; | ||||
| 							flag = 1; | ||||
| 						} | ||||
| 						if (flag) { | ||||
| 							bestn = n; | ||||
| 							bestm1 = m1; | ||||
| 							bestm2 = m2; | ||||
| 							bestp1 = p1; | ||||
| 							bestp2 = p2; | ||||
| 							flag = 0; | ||||
| 						} | ||||
| 					} | ||||
| 				} | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
| 	best_clock->n = bestn; | ||||
| 	best_clock->m1 = bestm1; | ||||
| 	best_clock->m2 = bestm2; | ||||
| 	best_clock->p1 = bestp1; | ||||
| 	best_clock->p2 = bestp2; | ||||
| 
 | ||||
| 	return true; | ||||
| } | ||||
| 
 | ||||
| static void ironlake_wait_for_vblank(struct drm_device *dev, int pipe) | ||||
| { | ||||
| @ -1232,6 +1374,9 @@ static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv, | ||||
| 	WARN(dp_pipe_enabled(dev_priv, pipe, port_sel, val), | ||||
| 	     "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n", | ||||
| 	     reg, pipe_name(pipe)); | ||||
| 
 | ||||
| 	WARN(HAS_PCH_IBX(dev_priv->dev) && (val & SDVO_PIPE_B_SELECT), | ||||
| 	     "IBX PCH dp port still using transcoder B\n"); | ||||
| } | ||||
| 
 | ||||
| static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv, | ||||
| @ -1241,6 +1386,9 @@ static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv, | ||||
| 	WARN(hdmi_pipe_enabled(dev_priv, val, pipe), | ||||
| 	     "PCH HDMI (0x%08x) enabled on transcoder %c, should be disabled\n", | ||||
| 	     reg, pipe_name(pipe)); | ||||
| 
 | ||||
| 	WARN(HAS_PCH_IBX(dev_priv->dev) && (val & SDVO_PIPE_B_SELECT), | ||||
| 	     "IBX PCH hdmi port still using transcoder B\n"); | ||||
| } | ||||
| 
 | ||||
| static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv, | ||||
| @ -1287,7 +1435,7 @@ static void intel_enable_pll(struct drm_i915_private *dev_priv, enum pipe pipe) | ||||
| 	u32 val; | ||||
| 
 | ||||
| 	/* No really, not for ILK+ */ | ||||
| 	BUG_ON(dev_priv->info->gen >= 5); | ||||
| 	BUG_ON(!IS_VALLEYVIEW(dev_priv->dev) && dev_priv->info->gen >= 5); | ||||
| 
 | ||||
| 	/* PLL is protected by panel, make sure we can write it */ | ||||
| 	if (IS_MOBILE(dev_priv->dev) && !IS_I830(dev_priv->dev)) | ||||
| @ -1344,7 +1492,7 @@ intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value) | ||||
| 	unsigned long flags; | ||||
| 
 | ||||
| 	spin_lock_irqsave(&dev_priv->dpio_lock, flags); | ||||
| 	if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_READY) == 0, | ||||
| 	if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0, | ||||
| 				100)) { | ||||
| 		DRM_ERROR("timeout waiting for SBI to become ready\n"); | ||||
| 		goto out_unlock; | ||||
| @ -1358,7 +1506,7 @@ intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value) | ||||
| 			SBI_BUSY | | ||||
| 			SBI_CTL_OP_CRWR); | ||||
| 
 | ||||
| 	if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_READY | SBI_RESPONSE_SUCCESS)) == 0, | ||||
| 	if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_BUSY | SBI_RESPONSE_FAIL)) == 0, | ||||
| 				100)) { | ||||
| 		DRM_ERROR("timeout waiting for SBI to complete write transaction\n"); | ||||
| 		goto out_unlock; | ||||
| @ -1372,10 +1520,10 @@ static u32 | ||||
| intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg) | ||||
| { | ||||
| 	unsigned long flags; | ||||
| 	u32 value; | ||||
| 	u32 value = 0; | ||||
| 
 | ||||
| 	spin_lock_irqsave(&dev_priv->dpio_lock, flags); | ||||
| 	if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_READY) == 0, | ||||
| 	if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0, | ||||
| 				100)) { | ||||
| 		DRM_ERROR("timeout waiting for SBI to become ready\n"); | ||||
| 		goto out_unlock; | ||||
| @ -1387,7 +1535,7 @@ intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg) | ||||
| 			SBI_BUSY | | ||||
| 			SBI_CTL_OP_CRRD); | ||||
| 
 | ||||
| 	if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_READY | SBI_RESPONSE_SUCCESS)) == 0, | ||||
| 	if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_BUSY | SBI_RESPONSE_FAIL)) == 0, | ||||
| 				100)) { | ||||
| 		DRM_ERROR("timeout waiting for SBI to complete read transaction\n"); | ||||
| 		goto out_unlock; | ||||
| @ -1824,6 +1972,22 @@ void intel_unpin_fb_obj(struct drm_i915_gem_object *obj) | ||||
| 	i915_gem_object_unpin(obj); | ||||
| } | ||||
| 
 | ||||
| /* Computes the linear offset to the base tile and adjusts x, y. bytes per pixel
 | ||||
|  * is assumed to be a power-of-two. */ | ||||
| static unsigned long gen4_compute_dspaddr_offset_xtiled(int *x, int *y, | ||||
| 							unsigned int bpp, | ||||
| 							unsigned int pitch) | ||||
| { | ||||
| 	int tile_rows, tiles; | ||||
| 
 | ||||
| 	tile_rows = *y / 8; | ||||
| 	*y %= 8; | ||||
| 	tiles = *x / (512/bpp); | ||||
| 	*x %= 512/bpp; | ||||
| 
 | ||||
| 	return tile_rows * pitch * 8 + tiles * 4096; | ||||
| } | ||||
| 
 | ||||
| static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb, | ||||
| 			     int x, int y) | ||||
| { | ||||
| @ -1833,7 +1997,7 @@ static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb, | ||||
| 	struct intel_framebuffer *intel_fb; | ||||
| 	struct drm_i915_gem_object *obj; | ||||
| 	int plane = intel_crtc->plane; | ||||
| 	unsigned long Start, Offset; | ||||
| 	unsigned long linear_offset; | ||||
| 	u32 dspcntr; | ||||
| 	u32 reg; | ||||
| 
 | ||||
| @ -1880,18 +2044,28 @@ static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb, | ||||
| 
 | ||||
| 	I915_WRITE(reg, dspcntr); | ||||
| 
 | ||||
| 	Start = obj->gtt_offset; | ||||
| 	Offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8); | ||||
| 	linear_offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8); | ||||
| 
 | ||||
| 	DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n", | ||||
| 		      Start, Offset, x, y, fb->pitches[0]); | ||||
| 	if (INTEL_INFO(dev)->gen >= 4) { | ||||
| 		intel_crtc->dspaddr_offset = | ||||
| 			gen4_compute_dspaddr_offset_xtiled(&x, &y, | ||||
| 							   fb->bits_per_pixel / 8, | ||||
| 							   fb->pitches[0]); | ||||
| 		linear_offset -= intel_crtc->dspaddr_offset; | ||||
| 	} else { | ||||
| 		intel_crtc->dspaddr_offset = linear_offset; | ||||
| 	} | ||||
| 
 | ||||
| 	DRM_DEBUG_KMS("Writing base %08X %08lX %d %d %d\n", | ||||
| 		      obj->gtt_offset, linear_offset, x, y, fb->pitches[0]); | ||||
| 	I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]); | ||||
| 	if (INTEL_INFO(dev)->gen >= 4) { | ||||
| 		I915_MODIFY_DISPBASE(DSPSURF(plane), Start); | ||||
| 		I915_MODIFY_DISPBASE(DSPSURF(plane), | ||||
| 				     obj->gtt_offset + intel_crtc->dspaddr_offset); | ||||
| 		I915_WRITE(DSPTILEOFF(plane), (y << 16) | x); | ||||
| 		I915_WRITE(DSPADDR(plane), Offset); | ||||
| 		I915_WRITE(DSPLINOFF(plane), linear_offset); | ||||
| 	} else | ||||
| 		I915_WRITE(DSPADDR(plane), Start + Offset); | ||||
| 		I915_WRITE(DSPADDR(plane), obj->gtt_offset + linear_offset); | ||||
| 	POSTING_READ(reg); | ||||
| 
 | ||||
| 	return 0; | ||||
| @ -1906,7 +2080,7 @@ static int ironlake_update_plane(struct drm_crtc *crtc, | ||||
| 	struct intel_framebuffer *intel_fb; | ||||
| 	struct drm_i915_gem_object *obj; | ||||
| 	int plane = intel_crtc->plane; | ||||
| 	unsigned long Start, Offset; | ||||
| 	unsigned long linear_offset; | ||||
| 	u32 dspcntr; | ||||
| 	u32 reg; | ||||
| 
 | ||||
| @ -1961,15 +2135,20 @@ static int ironlake_update_plane(struct drm_crtc *crtc, | ||||
| 
 | ||||
| 	I915_WRITE(reg, dspcntr); | ||||
| 
 | ||||
| 	Start = obj->gtt_offset; | ||||
| 	Offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8); | ||||
| 	linear_offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8); | ||||
| 	intel_crtc->dspaddr_offset = | ||||
| 		gen4_compute_dspaddr_offset_xtiled(&x, &y, | ||||
| 						   fb->bits_per_pixel / 8, | ||||
| 						   fb->pitches[0]); | ||||
| 	linear_offset -= intel_crtc->dspaddr_offset; | ||||
| 
 | ||||
| 	DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n", | ||||
| 		      Start, Offset, x, y, fb->pitches[0]); | ||||
| 	DRM_DEBUG_KMS("Writing base %08X %08lX %d %d %d\n", | ||||
| 		      obj->gtt_offset, linear_offset, x, y, fb->pitches[0]); | ||||
| 	I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]); | ||||
| 	I915_MODIFY_DISPBASE(DSPSURF(plane), Start); | ||||
| 	I915_MODIFY_DISPBASE(DSPSURF(plane), | ||||
| 			     obj->gtt_offset + intel_crtc->dspaddr_offset); | ||||
| 	I915_WRITE(DSPTILEOFF(plane), (y << 16) | x); | ||||
| 	I915_WRITE(DSPADDR(plane), Offset); | ||||
| 	I915_WRITE(DSPLINOFF(plane), linear_offset); | ||||
| 	POSTING_READ(reg); | ||||
| 
 | ||||
| 	return 0; | ||||
| @ -2656,16 +2835,13 @@ static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc) | ||||
| static bool intel_crtc_driving_pch(struct drm_crtc *crtc) | ||||
| { | ||||
| 	struct drm_device *dev = crtc->dev; | ||||
| 	struct drm_mode_config *mode_config = &dev->mode_config; | ||||
| 	struct intel_encoder *encoder; | ||||
| 
 | ||||
| 	/*
 | ||||
| 	 * If there's a non-PCH eDP on this crtc, it must be DP_A, and that | ||||
| 	 * must be driven by its own crtc; no sharing is possible. | ||||
| 	 */ | ||||
| 	list_for_each_entry(encoder, &mode_config->encoder_list, base.head) { | ||||
| 		if (encoder->base.crtc != crtc) | ||||
| 			continue; | ||||
| 	for_each_encoder_on_crtc(dev, crtc, encoder) { | ||||
| 
 | ||||
| 		/* On Haswell, LPT PCH handles the VGA connection via FDI, and Haswell
 | ||||
| 		 * CPU handles all others */ | ||||
| @ -3397,7 +3573,7 @@ void intel_encoder_destroy(struct drm_encoder *encoder) | ||||
| } | ||||
| 
 | ||||
| static bool intel_crtc_mode_fixup(struct drm_crtc *crtc, | ||||
| 				  struct drm_display_mode *mode, | ||||
| 				  const struct drm_display_mode *mode, | ||||
| 				  struct drm_display_mode *adjusted_mode) | ||||
| { | ||||
| 	struct drm_device *dev = crtc->dev; | ||||
| @ -3554,16 +3730,12 @@ static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc, | ||||
| { | ||||
| 	struct drm_device *dev = crtc->dev; | ||||
| 	struct drm_i915_private *dev_priv = dev->dev_private; | ||||
| 	struct drm_encoder *encoder; | ||||
| 	struct drm_connector *connector; | ||||
| 	struct intel_encoder *intel_encoder; | ||||
| 	unsigned int display_bpc = UINT_MAX, bpc; | ||||
| 
 | ||||
| 	/* Walk the encoders & connectors on this crtc, get min bpc */ | ||||
| 	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { | ||||
| 		struct intel_encoder *intel_encoder = to_intel_encoder(encoder); | ||||
| 
 | ||||
| 		if (encoder->crtc != crtc) | ||||
| 			continue; | ||||
| 	for_each_encoder_on_crtc(dev, crtc, intel_encoder) { | ||||
| 
 | ||||
| 		if (intel_encoder->type == INTEL_OUTPUT_LVDS) { | ||||
| 			unsigned int lvds_bpc; | ||||
| @ -3595,7 +3767,7 @@ static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc, | ||||
| 		/* Not one of the known troublemakers, check the EDID */ | ||||
| 		list_for_each_entry(connector, &dev->mode_config.connector_list, | ||||
| 				    head) { | ||||
| 			if (connector->encoder != encoder) | ||||
| 			if (connector->encoder != &intel_encoder->base) | ||||
| 				continue; | ||||
| 
 | ||||
| 			/* Don't use an invalid EDID bpc value */ | ||||
| @ -3666,13 +3838,37 @@ static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc, | ||||
| 	return display_bpc != bpc; | ||||
| } | ||||
| 
 | ||||
| static int vlv_get_refclk(struct drm_crtc *crtc) | ||||
| { | ||||
| 	struct drm_device *dev = crtc->dev; | ||||
| 	struct drm_i915_private *dev_priv = dev->dev_private; | ||||
| 	int refclk = 27000; /* for DP & HDMI */ | ||||
| 
 | ||||
| 	return 100000; /* only one validated so far */ | ||||
| 
 | ||||
| 	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG)) { | ||||
| 		refclk = 96000; | ||||
| 	} else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { | ||||
| 		if (intel_panel_use_ssc(dev_priv)) | ||||
| 			refclk = 100000; | ||||
| 		else | ||||
| 			refclk = 96000; | ||||
| 	} else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP)) { | ||||
| 		refclk = 100000; | ||||
| 	} | ||||
| 
 | ||||
| 	return refclk; | ||||
| } | ||||
| 
 | ||||
| static int i9xx_get_refclk(struct drm_crtc *crtc, int num_connectors) | ||||
| { | ||||
| 	struct drm_device *dev = crtc->dev; | ||||
| 	struct drm_i915_private *dev_priv = dev->dev_private; | ||||
| 	int refclk; | ||||
| 
 | ||||
| 	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) && | ||||
| 	if (IS_VALLEYVIEW(dev)) { | ||||
| 		refclk = vlv_get_refclk(crtc); | ||||
| 	} else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) && | ||||
| 	    intel_panel_use_ssc(dev_priv) && num_connectors < 2) { | ||||
| 		refclk = dev_priv->lvds_ssc_freq * 1000; | ||||
| 		DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n", | ||||
| @ -3787,6 +3983,72 @@ static void intel_update_lvds(struct drm_crtc *crtc, intel_clock_t *clock, | ||||
| 	I915_WRITE(LVDS, temp); | ||||
| } | ||||
| 
 | ||||
| static void vlv_update_pll(struct drm_crtc *crtc, | ||||
| 			   struct drm_display_mode *mode, | ||||
| 			   struct drm_display_mode *adjusted_mode, | ||||
| 			   intel_clock_t *clock, intel_clock_t *reduced_clock, | ||||
| 			   int refclk, int num_connectors) | ||||
| { | ||||
| 	struct drm_device *dev = crtc->dev; | ||||
| 	struct drm_i915_private *dev_priv = dev->dev_private; | ||||
| 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | ||||
| 	int pipe = intel_crtc->pipe; | ||||
| 	u32 dpll, mdiv, pdiv; | ||||
| 	u32 bestn, bestm1, bestm2, bestp1, bestp2; | ||||
| 	bool is_hdmi; | ||||
| 
 | ||||
| 	is_hdmi = intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI); | ||||
| 
 | ||||
| 	bestn = clock->n; | ||||
| 	bestm1 = clock->m1; | ||||
| 	bestm2 = clock->m2; | ||||
| 	bestp1 = clock->p1; | ||||
| 	bestp2 = clock->p2; | ||||
| 
 | ||||
| 	/* Enable DPIO clock input */ | ||||
| 	dpll = DPLL_EXT_BUFFER_ENABLE_VLV | DPLL_REFA_CLK_ENABLE_VLV | | ||||
| 		DPLL_VGA_MODE_DIS | DPLL_INTEGRATED_CLOCK_VLV; | ||||
| 	I915_WRITE(DPLL(pipe), dpll); | ||||
| 	POSTING_READ(DPLL(pipe)); | ||||
| 
 | ||||
| 	mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK)); | ||||
| 	mdiv |= ((bestp1 << DPIO_P1_SHIFT) | (bestp2 << DPIO_P2_SHIFT)); | ||||
| 	mdiv |= ((bestn << DPIO_N_SHIFT)); | ||||
| 	mdiv |= (1 << DPIO_POST_DIV_SHIFT); | ||||
| 	mdiv |= (1 << DPIO_K_SHIFT); | ||||
| 	mdiv |= DPIO_ENABLE_CALIBRATION; | ||||
| 	intel_dpio_write(dev_priv, DPIO_DIV(pipe), mdiv); | ||||
| 
 | ||||
| 	intel_dpio_write(dev_priv, DPIO_CORE_CLK(pipe), 0x01000000); | ||||
| 
 | ||||
| 	pdiv = DPIO_REFSEL_OVERRIDE | (5 << DPIO_PLL_MODESEL_SHIFT) | | ||||
| 		(3 << DPIO_BIAS_CURRENT_CTL_SHIFT) | (1<<20) | | ||||
| 		(8 << DPIO_DRIVER_CTL_SHIFT) | (5 << DPIO_CLK_BIAS_CTL_SHIFT); | ||||
| 	intel_dpio_write(dev_priv, DPIO_REFSFR(pipe), pdiv); | ||||
| 
 | ||||
| 	intel_dpio_write(dev_priv, DPIO_LFP_COEFF(pipe), 0x009f0051); | ||||
| 
 | ||||
| 	dpll |= DPLL_VCO_ENABLE; | ||||
| 	I915_WRITE(DPLL(pipe), dpll); | ||||
| 	POSTING_READ(DPLL(pipe)); | ||||
| 	if (wait_for(((I915_READ(DPLL(pipe)) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1)) | ||||
| 		DRM_ERROR("DPLL %d failed to lock\n", pipe); | ||||
| 
 | ||||
| 	if (is_hdmi) { | ||||
| 		u32 temp = intel_mode_get_pixel_multiplier(adjusted_mode); | ||||
| 
 | ||||
| 		if (temp > 1) | ||||
| 			temp = (temp - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT; | ||||
| 		else | ||||
| 			temp = 0; | ||||
| 
 | ||||
| 		I915_WRITE(DPLL_MD(pipe), temp); | ||||
| 		POSTING_READ(DPLL_MD(pipe)); | ||||
| 	} | ||||
| 
 | ||||
| 	intel_dpio_write(dev_priv, DPIO_FASTCLK_DISABLE, 0x641); /* ??? */ | ||||
| } | ||||
| 
 | ||||
| static void i9xx_update_pll(struct drm_crtc *crtc, | ||||
| 			    struct drm_display_mode *mode, | ||||
| 			    struct drm_display_mode *adjusted_mode, | ||||
| @ -3974,15 +4236,11 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc, | ||||
| 	u32 dspcntr, pipeconf, vsyncshift; | ||||
| 	bool ok, has_reduced_clock = false, is_sdvo = false; | ||||
| 	bool is_lvds = false, is_tv = false, is_dp = false; | ||||
| 	struct drm_mode_config *mode_config = &dev->mode_config; | ||||
| 	struct intel_encoder *encoder; | ||||
| 	const intel_limit_t *limit; | ||||
| 	int ret; | ||||
| 
 | ||||
| 	list_for_each_entry(encoder, &mode_config->encoder_list, base.head) { | ||||
| 		if (encoder->base.crtc != crtc) | ||||
| 			continue; | ||||
| 
 | ||||
| 	for_each_encoder_on_crtc(dev, crtc, encoder) { | ||||
| 		switch (encoder->type) { | ||||
| 		case INTEL_OUTPUT_LVDS: | ||||
| 			is_lvds = true; | ||||
| @ -4044,6 +4302,9 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc, | ||||
| 
 | ||||
| 	if (IS_GEN2(dev)) | ||||
| 		i8xx_update_pll(crtc, adjusted_mode, &clock, num_connectors); | ||||
| 	else if (IS_VALLEYVIEW(dev)) | ||||
| 		vlv_update_pll(crtc, mode,adjusted_mode, &clock, NULL, | ||||
| 			       refclk, num_connectors); | ||||
| 	else | ||||
| 		i9xx_update_pll(crtc, mode, adjusted_mode, &clock, | ||||
| 				has_reduced_clock ? &reduced_clock : NULL, | ||||
| @ -4282,15 +4543,11 @@ static int ironlake_get_refclk(struct drm_crtc *crtc) | ||||
| 	struct drm_device *dev = crtc->dev; | ||||
| 	struct drm_i915_private *dev_priv = dev->dev_private; | ||||
| 	struct intel_encoder *encoder; | ||||
| 	struct drm_mode_config *mode_config = &dev->mode_config; | ||||
| 	struct intel_encoder *edp_encoder = NULL; | ||||
| 	int num_connectors = 0; | ||||
| 	bool is_lvds = false; | ||||
| 
 | ||||
| 	list_for_each_entry(encoder, &mode_config->encoder_list, base.head) { | ||||
| 		if (encoder->base.crtc != crtc) | ||||
| 			continue; | ||||
| 
 | ||||
| 	for_each_encoder_on_crtc(dev, crtc, encoder) { | ||||
| 		switch (encoder->type) { | ||||
| 		case INTEL_OUTPUT_LVDS: | ||||
| 			is_lvds = true; | ||||
| @ -4327,7 +4584,6 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc, | ||||
| 	u32 dpll, fp = 0, fp2 = 0, dspcntr, pipeconf; | ||||
| 	bool ok, has_reduced_clock = false, is_sdvo = false; | ||||
| 	bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false; | ||||
| 	struct drm_mode_config *mode_config = &dev->mode_config; | ||||
| 	struct intel_encoder *encoder, *edp_encoder = NULL; | ||||
| 	const intel_limit_t *limit; | ||||
| 	int ret; | ||||
| @ -4338,10 +4594,7 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc, | ||||
| 	bool dither; | ||||
| 	bool is_cpu_edp = false, is_pch_edp = false; | ||||
| 
 | ||||
| 	list_for_each_entry(encoder, &mode_config->encoder_list, base.head) { | ||||
| 		if (encoder->base.crtc != crtc) | ||||
| 			continue; | ||||
| 
 | ||||
| 	for_each_encoder_on_crtc(dev, crtc, encoder) { | ||||
| 		switch (encoder->type) { | ||||
| 		case INTEL_OUTPUT_LVDS: | ||||
| 			is_lvds = true; | ||||
| @ -4405,25 +4658,10 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc, | ||||
| 						    &clock, | ||||
| 						    &reduced_clock); | ||||
| 	} | ||||
| 	/* SDVO TV has fixed PLL values depend on its clock range,
 | ||||
| 	   this mirrors vbios setting. */ | ||||
| 	if (is_sdvo && is_tv) { | ||||
| 		if (adjusted_mode->clock >= 100000 | ||||
| 		    && adjusted_mode->clock < 140500) { | ||||
| 			clock.p1 = 2; | ||||
| 			clock.p2 = 10; | ||||
| 			clock.n = 3; | ||||
| 			clock.m1 = 16; | ||||
| 			clock.m2 = 8; | ||||
| 		} else if (adjusted_mode->clock >= 140500 | ||||
| 			   && adjusted_mode->clock <= 200000) { | ||||
| 			clock.p1 = 1; | ||||
| 			clock.p2 = 10; | ||||
| 			clock.n = 6; | ||||
| 			clock.m1 = 12; | ||||
| 			clock.m2 = 8; | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	if (is_sdvo && is_tv) | ||||
| 		i9xx_adjust_sdvo_tv_clock(adjusted_mode, &clock); | ||||
| 
 | ||||
| 
 | ||||
| 	/* FDI link */ | ||||
| 	pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode); | ||||
| @ -4431,16 +4669,8 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc, | ||||
| 	/* CPU eDP doesn't require FDI link, so just set DP M/N
 | ||||
| 	   according to current link config */ | ||||
| 	if (is_cpu_edp) { | ||||
| 		target_clock = mode->clock; | ||||
| 		intel_edp_link_config(edp_encoder, &lane, &link_bw); | ||||
| 	} else { | ||||
| 		/* [e]DP over FDI requires target mode clock
 | ||||
| 		   instead of link clock */ | ||||
| 		if (is_dp) | ||||
| 			target_clock = mode->clock; | ||||
| 		else | ||||
| 			target_clock = adjusted_mode->clock; | ||||
| 
 | ||||
| 		/* FDI is a binary signal running at ~2.7GHz, encoding
 | ||||
| 		 * each output octet as 10 bits. The actual frequency | ||||
| 		 * is stored as a divider into a 100MHz clock, and the | ||||
| @ -4451,6 +4681,14 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc, | ||||
| 		link_bw = intel_fdi_link_freq(dev) * MHz(100)/KHz(1)/10; | ||||
| 	} | ||||
| 
 | ||||
| 	/* [e]DP over FDI requires target mode clock instead of link clock. */ | ||||
| 	if (edp_encoder) | ||||
| 		target_clock = intel_edp_target_clock(edp_encoder, mode); | ||||
| 	else if (is_dp) | ||||
| 		target_clock = mode->clock; | ||||
| 	else | ||||
| 		target_clock = adjusted_mode->clock; | ||||
| 
 | ||||
| 	/* determine panel color depth */ | ||||
| 	temp = I915_READ(PIPECONF(pipe)); | ||||
| 	temp &= ~PIPE_BPC_MASK; | ||||
| @ -4662,16 +4900,8 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc, | ||||
| 		if (is_lvds && has_reduced_clock && i915_powersave) { | ||||
| 			I915_WRITE(intel_crtc->pch_pll->fp1_reg, fp2); | ||||
| 			intel_crtc->lowfreq_avail = true; | ||||
| 			if (HAS_PIPE_CXSR(dev)) { | ||||
| 				DRM_DEBUG_KMS("enabling CxSR downclocking\n"); | ||||
| 				pipeconf |= PIPECONF_CXSR_DOWNCLOCK; | ||||
| 			} | ||||
| 		} else { | ||||
| 			I915_WRITE(intel_crtc->pch_pll->fp1_reg, fp); | ||||
| 			if (HAS_PIPE_CXSR(dev)) { | ||||
| 				DRM_DEBUG_KMS("disabling CxSR downclocking\n"); | ||||
| 				pipeconf &= ~PIPECONF_CXSR_DOWNCLOCK; | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| @ -5975,7 +6205,6 @@ static int intel_gen2_queue_flip(struct drm_device *dev, | ||||
| { | ||||
| 	struct drm_i915_private *dev_priv = dev->dev_private; | ||||
| 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | ||||
| 	unsigned long offset; | ||||
| 	u32 flip_mask; | ||||
| 	struct intel_ring_buffer *ring = &dev_priv->ring[RCS]; | ||||
| 	int ret; | ||||
| @ -5984,9 +6213,6 @@ static int intel_gen2_queue_flip(struct drm_device *dev, | ||||
| 	if (ret) | ||||
| 		goto err; | ||||
| 
 | ||||
| 	/* Offset into the new buffer for cases of shared fbs between CRTCs */ | ||||
| 	offset = crtc->y * fb->pitches[0] + crtc->x * fb->bits_per_pixel/8; | ||||
| 
 | ||||
| 	ret = intel_ring_begin(ring, 6); | ||||
| 	if (ret) | ||||
| 		goto err_unpin; | ||||
| @ -6003,7 +6229,7 @@ static int intel_gen2_queue_flip(struct drm_device *dev, | ||||
| 	intel_ring_emit(ring, MI_DISPLAY_FLIP | | ||||
| 			MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); | ||||
| 	intel_ring_emit(ring, fb->pitches[0]); | ||||
| 	intel_ring_emit(ring, obj->gtt_offset + offset); | ||||
| 	intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset); | ||||
| 	intel_ring_emit(ring, 0); /* aux display base address, unused */ | ||||
| 	intel_ring_advance(ring); | ||||
| 	return 0; | ||||
| @ -6021,7 +6247,6 @@ static int intel_gen3_queue_flip(struct drm_device *dev, | ||||
| { | ||||
| 	struct drm_i915_private *dev_priv = dev->dev_private; | ||||
| 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | ||||
| 	unsigned long offset; | ||||
| 	u32 flip_mask; | ||||
| 	struct intel_ring_buffer *ring = &dev_priv->ring[RCS]; | ||||
| 	int ret; | ||||
| @ -6030,9 +6255,6 @@ static int intel_gen3_queue_flip(struct drm_device *dev, | ||||
| 	if (ret) | ||||
| 		goto err; | ||||
| 
 | ||||
| 	/* Offset into the new buffer for cases of shared fbs between CRTCs */ | ||||
| 	offset = crtc->y * fb->pitches[0] + crtc->x * fb->bits_per_pixel/8; | ||||
| 
 | ||||
| 	ret = intel_ring_begin(ring, 6); | ||||
| 	if (ret) | ||||
| 		goto err_unpin; | ||||
| @ -6046,7 +6268,7 @@ static int intel_gen3_queue_flip(struct drm_device *dev, | ||||
| 	intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | | ||||
| 			MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); | ||||
| 	intel_ring_emit(ring, fb->pitches[0]); | ||||
| 	intel_ring_emit(ring, obj->gtt_offset + offset); | ||||
| 	intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset); | ||||
| 	intel_ring_emit(ring, MI_NOOP); | ||||
| 
 | ||||
| 	intel_ring_advance(ring); | ||||
| @ -6084,7 +6306,9 @@ static int intel_gen4_queue_flip(struct drm_device *dev, | ||||
| 	intel_ring_emit(ring, MI_DISPLAY_FLIP | | ||||
| 			MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); | ||||
| 	intel_ring_emit(ring, fb->pitches[0]); | ||||
| 	intel_ring_emit(ring, obj->gtt_offset | obj->tiling_mode); | ||||
| 	intel_ring_emit(ring, | ||||
| 			(obj->gtt_offset + intel_crtc->dspaddr_offset) | | ||||
| 			obj->tiling_mode); | ||||
| 
 | ||||
| 	/* XXX Enabling the panel-fitter across page-flip is so far
 | ||||
| 	 * untested on non-native modes, so ignore it for now. | ||||
| @ -6124,7 +6348,7 @@ static int intel_gen6_queue_flip(struct drm_device *dev, | ||||
| 	intel_ring_emit(ring, MI_DISPLAY_FLIP | | ||||
| 			MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); | ||||
| 	intel_ring_emit(ring, fb->pitches[0] | obj->tiling_mode); | ||||
| 	intel_ring_emit(ring, obj->gtt_offset); | ||||
| 	intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset); | ||||
| 
 | ||||
| 	/* Contrary to the suggestions in the documentation,
 | ||||
| 	 * "Enable Panel Fitter" does not seem to be required when page | ||||
| @ -6187,7 +6411,7 @@ static int intel_gen7_queue_flip(struct drm_device *dev, | ||||
| 
 | ||||
| 	intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | plane_bit); | ||||
| 	intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode)); | ||||
| 	intel_ring_emit(ring, (obj->gtt_offset)); | ||||
| 	intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset); | ||||
| 	intel_ring_emit(ring, (MI_NOOP)); | ||||
| 	intel_ring_advance(ring); | ||||
| 	return 0; | ||||
| @ -6219,6 +6443,19 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, | ||||
| 	unsigned long flags; | ||||
| 	int ret; | ||||
| 
 | ||||
| 	/* Can't change pixel format via MI display flips. */ | ||||
| 	if (fb->pixel_format != crtc->fb->pixel_format) | ||||
| 		return -EINVAL; | ||||
| 
 | ||||
| 	/*
 | ||||
| 	 * TILEOFF/LINOFF registers can't be changed via MI display flips. | ||||
| 	 * Note that pitch changes could also affect these register. | ||||
| 	 */ | ||||
| 	if (INTEL_INFO(dev)->gen > 3 && | ||||
| 	    (fb->offsets[0] != crtc->fb->offsets[0] || | ||||
| 	     fb->pitches[0] != crtc->fb->pitches[0])) | ||||
| 		return -EINVAL; | ||||
| 
 | ||||
| 	work = kzalloc(sizeof *work, GFP_KERNEL); | ||||
| 	if (work == NULL) | ||||
| 		return -ENOMEM; | ||||
| @ -6249,7 +6486,9 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, | ||||
| 	intel_fb = to_intel_framebuffer(fb); | ||||
| 	obj = intel_fb->obj; | ||||
| 
 | ||||
| 	mutex_lock(&dev->struct_mutex); | ||||
| 	ret = i915_mutex_lock_interruptible(dev); | ||||
| 	if (ret) | ||||
| 		goto cleanup; | ||||
| 
 | ||||
| 	/* Reference the objects for the scheduled work. */ | ||||
| 	drm_gem_object_reference(&work->old_fb_obj->base); | ||||
| @ -6284,6 +6523,7 @@ cleanup_pending: | ||||
| 	drm_gem_object_unreference(&obj->base); | ||||
| 	mutex_unlock(&dev->struct_mutex); | ||||
| 
 | ||||
| cleanup: | ||||
| 	spin_lock_irqsave(&dev->event_lock, flags); | ||||
| 	intel_crtc->unpin_work = NULL; | ||||
| 	spin_unlock_irqrestore(&dev->event_lock, flags); | ||||
| @ -6566,7 +6806,24 @@ static void intel_setup_outputs(struct drm_device *dev) | ||||
| 
 | ||||
| 		if (!dpd_is_edp && (I915_READ(PCH_DP_D) & DP_DETECTED)) | ||||
| 			intel_dp_init(dev, PCH_DP_D); | ||||
| 	} else if (IS_VALLEYVIEW(dev)) { | ||||
| 		int found; | ||||
| 
 | ||||
| 		if (I915_READ(SDVOB) & PORT_DETECTED) { | ||||
| 			/* SDVOB multiplex with HDMIB */ | ||||
| 			found = intel_sdvo_init(dev, SDVOB, true); | ||||
| 			if (!found) | ||||
| 				intel_hdmi_init(dev, SDVOB); | ||||
| 			if (!found && (I915_READ(DP_B) & DP_DETECTED)) | ||||
| 				intel_dp_init(dev, DP_B); | ||||
| 		} | ||||
| 
 | ||||
| 		if (I915_READ(SDVOC) & PORT_DETECTED) | ||||
| 			intel_hdmi_init(dev, SDVOC); | ||||
| 
 | ||||
| 		/* Shares lanes with HDMI on SDVOC */ | ||||
| 		if (I915_READ(DP_C) & DP_DETECTED) | ||||
| 			intel_dp_init(dev, DP_C); | ||||
| 	} else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) { | ||||
| 		bool found = false; | ||||
| 
 | ||||
| @ -6623,7 +6880,7 @@ static void intel_setup_outputs(struct drm_device *dev) | ||||
| 	/* disable all the possible outputs/crtcs before entering KMS mode */ | ||||
| 	drm_helper_disable_unused_functions(dev); | ||||
| 
 | ||||
| 	if (HAS_PCH_SPLIT(dev)) | ||||
| 	if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) | ||||
| 		ironlake_init_pch_refclk(dev); | ||||
| } | ||||
| 
 | ||||
| @ -6777,9 +7034,6 @@ static void intel_init_display(struct drm_device *dev) | ||||
| 			dev_priv->display.write_eld = ironlake_write_eld; | ||||
| 		} else | ||||
| 			dev_priv->display.update_wm = NULL; | ||||
| 	} else if (IS_VALLEYVIEW(dev)) { | ||||
| 		dev_priv->display.force_wake_get = vlv_force_wake_get; | ||||
| 		dev_priv->display.force_wake_put = vlv_force_wake_put; | ||||
| 	} else if (IS_G4X(dev)) { | ||||
| 		dev_priv->display.write_eld = g4x_write_eld; | ||||
| 	} | ||||
| @ -6923,20 +7177,18 @@ static void i915_disable_vga(struct drm_device *dev) | ||||
| 
 | ||||
| void intel_modeset_init_hw(struct drm_device *dev) | ||||
| { | ||||
| 	struct drm_i915_private *dev_priv = dev->dev_private; | ||||
| 	/* We attempt to init the necessary power wells early in the initialization
 | ||||
| 	 * time, so the subsystems that expect power to be enabled can work. | ||||
| 	 */ | ||||
| 	intel_init_power_wells(dev); | ||||
| 
 | ||||
| 	intel_prepare_ddi(dev); | ||||
| 
 | ||||
| 	intel_init_clock_gating(dev); | ||||
| 
 | ||||
| 	if (IS_IRONLAKE_M(dev)) { | ||||
| 		ironlake_enable_drps(dev); | ||||
| 		ironlake_enable_rc6(dev); | ||||
| 		intel_init_emon(dev); | ||||
| 	} | ||||
| 
 | ||||
| 	if ((IS_GEN6(dev) || IS_GEN7(dev)) && !IS_VALLEYVIEW(dev)) { | ||||
| 		gen6_enable_rps(dev_priv); | ||||
| 		gen6_update_ring_freq(dev_priv); | ||||
| 	} | ||||
| 	mutex_lock(&dev->struct_mutex); | ||||
| 	intel_enable_gt_powersave(dev); | ||||
| 	mutex_unlock(&dev->struct_mutex); | ||||
| } | ||||
| 
 | ||||
| void intel_modeset_init(struct drm_device *dev) | ||||
| @ -6958,8 +7210,6 @@ void intel_modeset_init(struct drm_device *dev) | ||||
| 
 | ||||
| 	intel_init_pm(dev); | ||||
| 
 | ||||
| 	intel_prepare_ddi(dev); | ||||
| 
 | ||||
| 	intel_init_display(dev); | ||||
| 
 | ||||
| 	if (IS_GEN2(dev)) { | ||||
| @ -6972,7 +7222,7 @@ void intel_modeset_init(struct drm_device *dev) | ||||
| 		dev->mode_config.max_width = 8192; | ||||
| 		dev->mode_config.max_height = 8192; | ||||
| 	} | ||||
| 	dev->mode_config.fb_base = dev->agp->base; | ||||
| 	dev->mode_config.fb_base = dev_priv->mm.gtt_base_addr; | ||||
| 
 | ||||
| 	DRM_DEBUG_KMS("%d display pipe%s available.\n", | ||||
| 		      dev_priv->num_pipe, dev_priv->num_pipe > 1 ? "s" : ""); | ||||
| @ -7025,13 +7275,9 @@ void intel_modeset_cleanup(struct drm_device *dev) | ||||
| 
 | ||||
| 	intel_disable_fbc(dev); | ||||
| 
 | ||||
| 	if (IS_IRONLAKE_M(dev)) | ||||
| 		ironlake_disable_drps(dev); | ||||
| 	if ((IS_GEN6(dev) || IS_GEN7(dev)) && !IS_VALLEYVIEW(dev)) | ||||
| 		gen6_disable_rps(dev); | ||||
| 	intel_disable_gt_powersave(dev); | ||||
| 
 | ||||
| 	if (IS_IRONLAKE_M(dev)) | ||||
| 		ironlake_disable_rc6(dev); | ||||
| 	ironlake_teardown_rc6(dev); | ||||
| 
 | ||||
| 	if (IS_VALLEYVIEW(dev)) | ||||
| 		vlv_init_dpio(dev); | ||||
|  | ||||
| @ -155,6 +155,18 @@ intel_edp_link_config(struct intel_encoder *intel_encoder, | ||||
| 		*link_bw = 270000; | ||||
| } | ||||
| 
 | ||||
| int | ||||
| intel_edp_target_clock(struct intel_encoder *intel_encoder, | ||||
| 		       struct drm_display_mode *mode) | ||||
| { | ||||
| 	struct intel_dp *intel_dp = container_of(intel_encoder, struct intel_dp, base); | ||||
| 
 | ||||
| 	if (intel_dp->panel_fixed_mode) | ||||
| 		return intel_dp->panel_fixed_mode->clock; | ||||
| 	else | ||||
| 		return mode->clock; | ||||
| } | ||||
| 
 | ||||
| static int | ||||
| intel_dp_max_lane_count(struct intel_dp *intel_dp) | ||||
| { | ||||
| @ -225,7 +237,7 @@ intel_dp_max_data_rate(int max_link_clock, int max_lanes) | ||||
| static bool | ||||
| intel_dp_adjust_dithering(struct intel_dp *intel_dp, | ||||
| 			  struct drm_display_mode *mode, | ||||
| 			  struct drm_display_mode *adjusted_mode) | ||||
| 			  bool adjust_mode) | ||||
| { | ||||
| 	int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_dp)); | ||||
| 	int max_lanes = intel_dp_max_lane_count(intel_dp); | ||||
| @ -239,8 +251,8 @@ intel_dp_adjust_dithering(struct intel_dp *intel_dp, | ||||
| 		if (mode_rate > max_rate) | ||||
| 			return false; | ||||
| 
 | ||||
| 		if (adjusted_mode) | ||||
| 			adjusted_mode->private_flags | ||||
| 		if (adjust_mode) | ||||
| 			mode->private_flags | ||||
| 				|= INTEL_MODE_DP_FORCE_6BPC; | ||||
| 
 | ||||
| 		return true; | ||||
| @ -263,7 +275,7 @@ intel_dp_mode_valid(struct drm_connector *connector, | ||||
| 			return MODE_PANEL; | ||||
| 	} | ||||
| 
 | ||||
| 	if (!intel_dp_adjust_dithering(intel_dp, mode, NULL)) | ||||
| 	if (!intel_dp_adjust_dithering(intel_dp, mode, false)) | ||||
| 		return MODE_CLOCK_HIGH; | ||||
| 
 | ||||
| 	if (mode->clock < 10000) | ||||
| @ -691,7 +703,8 @@ intel_dp_i2c_init(struct intel_dp *intel_dp, | ||||
| } | ||||
| 
 | ||||
| static bool | ||||
| intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode, | ||||
| intel_dp_mode_fixup(struct drm_encoder *encoder, | ||||
| 		    const struct drm_display_mode *mode, | ||||
| 		    struct drm_display_mode *adjusted_mode) | ||||
| { | ||||
| 	struct drm_device *dev = encoder->dev; | ||||
| @ -706,28 +719,23 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode, | ||||
| 		intel_fixed_panel_mode(intel_dp->panel_fixed_mode, adjusted_mode); | ||||
| 		intel_pch_panel_fitting(dev, DRM_MODE_SCALE_FULLSCREEN, | ||||
| 					mode, adjusted_mode); | ||||
| 		/*
 | ||||
| 		 * the mode->clock is used to calculate the Data&Link M/N | ||||
| 		 * of the pipe. For the eDP the fixed clock should be used. | ||||
| 		 */ | ||||
| 		mode->clock = intel_dp->panel_fixed_mode->clock; | ||||
| 	} | ||||
| 
 | ||||
| 	if (mode->flags & DRM_MODE_FLAG_DBLCLK) | ||||
| 	if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK) | ||||
| 		return false; | ||||
| 
 | ||||
| 	DRM_DEBUG_KMS("DP link computation with max lane count %i " | ||||
| 		      "max bw %02x pixel clock %iKHz\n", | ||||
| 		      max_lane_count, bws[max_clock], mode->clock); | ||||
| 		      max_lane_count, bws[max_clock], adjusted_mode->clock); | ||||
| 
 | ||||
| 	if (!intel_dp_adjust_dithering(intel_dp, mode, adjusted_mode)) | ||||
| 	if (!intel_dp_adjust_dithering(intel_dp, adjusted_mode, true)) | ||||
| 		return false; | ||||
| 
 | ||||
| 	bpp = adjusted_mode->private_flags & INTEL_MODE_DP_FORCE_6BPC ? 18 : 24; | ||||
| 	mode_rate = intel_dp_link_required(mode->clock, bpp); | ||||
| 	mode_rate = intel_dp_link_required(adjusted_mode->clock, bpp); | ||||
| 
 | ||||
| 	for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) { | ||||
| 		for (clock = 0; clock <= max_clock; clock++) { | ||||
| 	for (clock = 0; clock <= max_clock; clock++) { | ||||
| 		for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) { | ||||
| 			int link_avail = intel_dp_max_data_rate(intel_dp_link_clock(bws[clock]), lane_count); | ||||
| 
 | ||||
| 			if (mode_rate <= link_avail) { | ||||
| @ -786,8 +794,7 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, | ||||
| 		 struct drm_display_mode *adjusted_mode) | ||||
| { | ||||
| 	struct drm_device *dev = crtc->dev; | ||||
| 	struct drm_mode_config *mode_config = &dev->mode_config; | ||||
| 	struct drm_encoder *encoder; | ||||
| 	struct intel_encoder *encoder; | ||||
| 	struct drm_i915_private *dev_priv = dev->dev_private; | ||||
| 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | ||||
| 	int lane_count = 4; | ||||
| @ -797,13 +804,9 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, | ||||
| 	/*
 | ||||
| 	 * Find the lane count in the intel_encoder private | ||||
| 	 */ | ||||
| 	list_for_each_entry(encoder, &mode_config->encoder_list, head) { | ||||
| 		struct intel_dp *intel_dp; | ||||
| 	for_each_encoder_on_crtc(dev, crtc, encoder) { | ||||
| 		struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); | ||||
| 
 | ||||
| 		if (encoder->crtc != crtc) | ||||
| 			continue; | ||||
| 
 | ||||
| 		intel_dp = enc_to_intel_dp(encoder); | ||||
| 		if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT || | ||||
| 		    intel_dp->base.type == INTEL_OUTPUT_EDP) | ||||
| 		{ | ||||
| @ -1768,7 +1771,7 @@ intel_dp_start_link_train(struct intel_dp *intel_dp) | ||||
| 		for (i = 0; i < intel_dp->lane_count; i++) | ||||
| 			if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0) | ||||
| 				break; | ||||
| 		if (i == intel_dp->lane_count) { | ||||
| 		if (i == intel_dp->lane_count && voltage_tries == 5) { | ||||
| 			++loop_tries; | ||||
| 			if (loop_tries == 5) { | ||||
| 				DRM_DEBUG_KMS("too many full retries, give up\n"); | ||||
| @ -1922,7 +1925,7 @@ intel_dp_link_down(struct intel_dp *intel_dp) | ||||
| 			DP |= DP_LINK_TRAIN_OFF; | ||||
| 	} | ||||
| 
 | ||||
| 	if (!HAS_PCH_CPT(dev) && | ||||
| 	if (HAS_PCH_IBX(dev) && | ||||
| 	    I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) { | ||||
| 		struct drm_crtc *crtc = intel_dp->base.base.crtc; | ||||
| 
 | ||||
| @ -2099,25 +2102,23 @@ g4x_dp_detect(struct intel_dp *intel_dp) | ||||
| { | ||||
| 	struct drm_device *dev = intel_dp->base.base.dev; | ||||
| 	struct drm_i915_private *dev_priv = dev->dev_private; | ||||
| 	uint32_t temp, bit; | ||||
| 	uint32_t bit; | ||||
| 
 | ||||
| 	switch (intel_dp->output_reg) { | ||||
| 	case DP_B: | ||||
| 		bit = DPB_HOTPLUG_INT_STATUS; | ||||
| 		bit = DPB_HOTPLUG_LIVE_STATUS; | ||||
| 		break; | ||||
| 	case DP_C: | ||||
| 		bit = DPC_HOTPLUG_INT_STATUS; | ||||
| 		bit = DPC_HOTPLUG_LIVE_STATUS; | ||||
| 		break; | ||||
| 	case DP_D: | ||||
| 		bit = DPD_HOTPLUG_INT_STATUS; | ||||
| 		bit = DPD_HOTPLUG_LIVE_STATUS; | ||||
| 		break; | ||||
| 	default: | ||||
| 		return connector_status_unknown; | ||||
| 	} | ||||
| 
 | ||||
| 	temp = I915_READ(PORT_HOTPLUG_STAT); | ||||
| 
 | ||||
| 	if ((temp & bit) == 0) | ||||
| 	if ((I915_READ(PORT_HOTPLUG_STAT) & bit) == 0) | ||||
| 		return connector_status_disconnected; | ||||
| 
 | ||||
| 	return intel_dp_detect_dpcd(intel_dp); | ||||
| @ -2399,16 +2400,11 @@ int | ||||
| intel_trans_dp_port_sel(struct drm_crtc *crtc) | ||||
| { | ||||
| 	struct drm_device *dev = crtc->dev; | ||||
| 	struct drm_mode_config *mode_config = &dev->mode_config; | ||||
| 	struct drm_encoder *encoder; | ||||
| 	struct intel_encoder *encoder; | ||||
| 
 | ||||
| 	list_for_each_entry(encoder, &mode_config->encoder_list, head) { | ||||
| 		struct intel_dp *intel_dp; | ||||
| 	for_each_encoder_on_crtc(dev, crtc, encoder) { | ||||
| 		struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); | ||||
| 
 | ||||
| 		if (encoder->crtc != crtc) | ||||
| 			continue; | ||||
| 
 | ||||
| 		intel_dp = enc_to_intel_dp(encoder); | ||||
| 		if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT || | ||||
| 		    intel_dp->base.type == INTEL_OUTPUT_EDP) | ||||
| 			return intel_dp->output_reg; | ||||
| @ -2520,19 +2516,19 @@ intel_dp_init(struct drm_device *dev, int output_reg) | ||||
| 		case DP_B: | ||||
| 		case PCH_DP_B: | ||||
| 			dev_priv->hotplug_supported_mask |= | ||||
| 				HDMIB_HOTPLUG_INT_STATUS; | ||||
| 				DPB_HOTPLUG_INT_STATUS; | ||||
| 			name = "DPDDC-B"; | ||||
| 			break; | ||||
| 		case DP_C: | ||||
| 		case PCH_DP_C: | ||||
| 			dev_priv->hotplug_supported_mask |= | ||||
| 				HDMIC_HOTPLUG_INT_STATUS; | ||||
| 				DPC_HOTPLUG_INT_STATUS; | ||||
| 			name = "DPDDC-C"; | ||||
| 			break; | ||||
| 		case DP_D: | ||||
| 		case PCH_DP_D: | ||||
| 			dev_priv->hotplug_supported_mask |= | ||||
| 				HDMID_HOTPLUG_INT_STATUS; | ||||
| 				DPD_HOTPLUG_INT_STATUS; | ||||
| 			name = "DPDDC-D"; | ||||
| 			break; | ||||
| 	} | ||||
|  | ||||
| @ -169,6 +169,7 @@ struct intel_crtc { | ||||
| 	u8 lut_r[256], lut_g[256], lut_b[256]; | ||||
| 	int dpms_mode; | ||||
| 	bool active; /* is the crtc on? independent of the dpms mode */ | ||||
| 	bool primary_disabled; /* is the crtc obscured by a plane? */ | ||||
| 	bool busy; /* is scanout buffer being updated frequently? */ | ||||
| 	struct timer_list idle_timer; | ||||
| 	bool lowfreq_avail; | ||||
| @ -176,6 +177,11 @@ struct intel_crtc { | ||||
| 	struct intel_unpin_work *unpin_work; | ||||
| 	int fdi_lanes; | ||||
| 
 | ||||
| 	/* Display surface base address adjustement for pageflips. Note that on
 | ||||
| 	 * gen4+ this only adjusts up to a tile, offsets within a tile are | ||||
| 	 * handled in the hw itself (with the TILEOFF register). */ | ||||
| 	unsigned long dspaddr_offset; | ||||
| 
 | ||||
| 	struct drm_i915_gem_object *cursor_bo; | ||||
| 	uint32_t cursor_addr; | ||||
| 	int16_t cursor_x, cursor_y; | ||||
| @ -191,7 +197,6 @@ struct intel_plane { | ||||
| 	struct drm_plane base; | ||||
| 	enum pipe pipe; | ||||
| 	struct drm_i915_gem_object *obj; | ||||
| 	bool primary_disabled; | ||||
| 	int max_downscale; | ||||
| 	u32 lut_r[1024], lut_g[1024], lut_b[1024]; | ||||
| 	void (*update_plane)(struct drm_plane *plane, | ||||
| @ -301,6 +306,8 @@ struct intel_hdmi { | ||||
| 	enum hdmi_force_audio force_audio; | ||||
| 	void (*write_infoframe)(struct drm_encoder *encoder, | ||||
| 				struct dip_infoframe *frame); | ||||
| 	void (*set_infoframes)(struct drm_encoder *encoder, | ||||
| 			       struct drm_display_mode *adjusted_mode); | ||||
| }; | ||||
| 
 | ||||
| static inline struct drm_crtc * | ||||
| @ -335,7 +342,6 @@ struct intel_fbc_work { | ||||
| }; | ||||
| 
 | ||||
| int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter); | ||||
| extern bool intel_ddc_probe(struct intel_encoder *intel_encoder, int ddc_bus); | ||||
| 
 | ||||
| extern void intel_attach_force_audio_property(struct drm_connector *connector); | ||||
| extern void intel_attach_broadcast_rgb_property(struct drm_connector *connector); | ||||
| @ -343,9 +349,6 @@ extern void intel_attach_broadcast_rgb_property(struct drm_connector *connector) | ||||
| extern void intel_crt_init(struct drm_device *dev); | ||||
| extern void intel_hdmi_init(struct drm_device *dev, int sdvox_reg); | ||||
| extern struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder); | ||||
| extern void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder, | ||||
| 			    struct drm_display_mode *adjusted_mode); | ||||
| extern void intel_hdmi_set_spd_infoframe(struct drm_encoder *encoder); | ||||
| extern void intel_dip_infoframe_csum(struct dip_infoframe *avi_if); | ||||
| extern bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, | ||||
| 			    bool is_sdvob); | ||||
| @ -360,6 +363,8 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, | ||||
| 		 struct drm_display_mode *adjusted_mode); | ||||
| extern bool intel_dpd_is_edp(struct drm_device *dev); | ||||
| extern void intel_edp_link_config(struct intel_encoder *, int *, int *); | ||||
| extern int intel_edp_target_clock(struct intel_encoder *, | ||||
| 				  struct drm_display_mode *mode); | ||||
| extern bool intel_encoder_is_pch_edp(struct drm_encoder *encoder); | ||||
| extern int intel_plane_init(struct drm_device *dev, enum pipe pipe); | ||||
| extern void intel_flush_display_plane(struct drm_i915_private *dev_priv, | ||||
| @ -372,13 +377,14 @@ extern void intel_fixed_panel_mode(struct drm_display_mode *fixed_mode, | ||||
| 				   struct drm_display_mode *adjusted_mode); | ||||
| extern void intel_pch_panel_fitting(struct drm_device *dev, | ||||
| 				    int fitting_mode, | ||||
| 				    struct drm_display_mode *mode, | ||||
| 				    const struct drm_display_mode *mode, | ||||
| 				    struct drm_display_mode *adjusted_mode); | ||||
| extern u32 intel_panel_get_max_backlight(struct drm_device *dev); | ||||
| extern u32 intel_panel_get_backlight(struct drm_device *dev); | ||||
| extern void intel_panel_set_backlight(struct drm_device *dev, u32 level); | ||||
| extern int intel_panel_setup_backlight(struct drm_device *dev); | ||||
| extern void intel_panel_enable_backlight(struct drm_device *dev); | ||||
| extern void intel_panel_enable_backlight(struct drm_device *dev, | ||||
| 					 enum pipe pipe); | ||||
| extern void intel_panel_disable_backlight(struct drm_device *dev); | ||||
| extern void intel_panel_destroy_backlight(struct drm_device *dev); | ||||
| extern enum drm_connector_status intel_panel_detect(struct drm_device *dev); | ||||
| @ -423,9 +429,6 @@ extern void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green, | ||||
| extern void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green, | ||||
| 				    u16 *blue, int regno); | ||||
| extern void intel_enable_clock_gating(struct drm_device *dev); | ||||
| extern void ironlake_disable_rc6(struct drm_device *dev); | ||||
| extern void ironlake_enable_drps(struct drm_device *dev); | ||||
| extern void ironlake_disable_drps(struct drm_device *dev); | ||||
| 
 | ||||
| extern int intel_pin_and_fence_fb_obj(struct drm_device *dev, | ||||
| 				      struct drm_i915_gem_object *obj, | ||||
| @ -492,10 +495,11 @@ extern void intel_update_fbc(struct drm_device *dev); | ||||
| extern void intel_gpu_ips_init(struct drm_i915_private *dev_priv); | ||||
| extern void intel_gpu_ips_teardown(void); | ||||
| 
 | ||||
| extern void gen6_enable_rps(struct drm_i915_private *dev_priv); | ||||
| extern void gen6_update_ring_freq(struct drm_i915_private *dev_priv); | ||||
| extern void gen6_disable_rps(struct drm_device *dev); | ||||
| extern void intel_init_emon(struct drm_device *dev); | ||||
| extern void intel_init_power_wells(struct drm_device *dev); | ||||
| extern void intel_enable_gt_powersave(struct drm_device *dev); | ||||
| extern void intel_disable_gt_powersave(struct drm_device *dev); | ||||
| extern void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv); | ||||
| extern void ironlake_teardown_rc6(struct drm_device *dev); | ||||
| 
 | ||||
| extern void intel_ddi_dpms(struct drm_encoder *encoder, int mode); | ||||
| extern void intel_ddi_mode_set(struct drm_encoder *encoder, | ||||
|  | ||||
| @ -136,7 +136,7 @@ static int intel_dvo_mode_valid(struct drm_connector *connector, | ||||
| } | ||||
| 
 | ||||
| static bool intel_dvo_mode_fixup(struct drm_encoder *encoder, | ||||
| 				 struct drm_display_mode *mode, | ||||
| 				 const struct drm_display_mode *mode, | ||||
| 				 struct drm_display_mode *adjusted_mode) | ||||
| { | ||||
| 	struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder); | ||||
|  | ||||
| @ -65,7 +65,7 @@ static int intelfb_create(struct intel_fbdev *ifbdev, | ||||
| 	struct drm_i915_private *dev_priv = dev->dev_private; | ||||
| 	struct fb_info *info; | ||||
| 	struct drm_framebuffer *fb; | ||||
| 	struct drm_mode_fb_cmd2 mode_cmd; | ||||
| 	struct drm_mode_fb_cmd2 mode_cmd = {}; | ||||
| 	struct drm_i915_gem_object *obj; | ||||
| 	struct device *device = &dev->pdev->dev; | ||||
| 	int size, ret; | ||||
| @ -140,7 +140,9 @@ static int intelfb_create(struct intel_fbdev *ifbdev, | ||||
| 	info->fix.smem_start = dev->mode_config.fb_base + obj->gtt_offset; | ||||
| 	info->fix.smem_len = size; | ||||
| 
 | ||||
| 	info->screen_base = ioremap_wc(dev->agp->base + obj->gtt_offset, size); | ||||
| 	info->screen_base = | ||||
| 		ioremap_wc(dev_priv->mm.gtt_base_addr + obj->gtt_offset, | ||||
| 			   size); | ||||
| 	if (!info->screen_base) { | ||||
| 		ret = -ENOSPC; | ||||
| 		goto out_unpin; | ||||
|  | ||||
| @ -37,6 +37,19 @@ | ||||
| #include "i915_drm.h" | ||||
| #include "i915_drv.h" | ||||
| 
 | ||||
| static void | ||||
| assert_hdmi_port_disabled(struct intel_hdmi *intel_hdmi) | ||||
| { | ||||
| 	struct drm_device *dev = intel_hdmi->base.base.dev; | ||||
| 	struct drm_i915_private *dev_priv = dev->dev_private; | ||||
| 	uint32_t enabled_bits; | ||||
| 
 | ||||
| 	enabled_bits = IS_HASWELL(dev) ? DDI_BUF_CTL_ENABLE : SDVO_ENABLE; | ||||
| 
 | ||||
| 	WARN(I915_READ(intel_hdmi->sdvox_reg) & enabled_bits, | ||||
| 	     "HDMI port enabled, expecting disabled\n"); | ||||
| } | ||||
| 
 | ||||
| struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder) | ||||
| { | ||||
| 	return container_of(encoder, struct intel_hdmi, base.base); | ||||
| @ -121,36 +134,31 @@ static void g4x_write_infoframe(struct drm_encoder *encoder, | ||||
| 	uint32_t *data = (uint32_t *)frame; | ||||
| 	struct drm_device *dev = encoder->dev; | ||||
| 	struct drm_i915_private *dev_priv = dev->dev_private; | ||||
| 	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); | ||||
| 	u32 val = I915_READ(VIDEO_DIP_CTL); | ||||
| 	unsigned i, len = DIP_HEADER_SIZE + frame->len; | ||||
| 
 | ||||
| 	val &= ~VIDEO_DIP_PORT_MASK; | ||||
| 	if (intel_hdmi->sdvox_reg == SDVOB) | ||||
| 		val |= VIDEO_DIP_PORT_B; | ||||
| 	else if (intel_hdmi->sdvox_reg == SDVOC) | ||||
| 		val |= VIDEO_DIP_PORT_C; | ||||
| 	else | ||||
| 		return; | ||||
| 	WARN(!(val & VIDEO_DIP_ENABLE), "Writing DIP with CTL reg disabled\n"); | ||||
| 
 | ||||
| 	val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */ | ||||
| 	val |= g4x_infoframe_index(frame); | ||||
| 
 | ||||
| 	val &= ~g4x_infoframe_enable(frame); | ||||
| 	val |= VIDEO_DIP_ENABLE; | ||||
| 
 | ||||
| 	I915_WRITE(VIDEO_DIP_CTL, val); | ||||
| 
 | ||||
| 	mmiowb(); | ||||
| 	for (i = 0; i < len; i += 4) { | ||||
| 		I915_WRITE(VIDEO_DIP_DATA, *data); | ||||
| 		data++; | ||||
| 	} | ||||
| 	mmiowb(); | ||||
| 
 | ||||
| 	val |= g4x_infoframe_enable(frame); | ||||
| 	val &= ~VIDEO_DIP_FREQ_MASK; | ||||
| 	val |= VIDEO_DIP_FREQ_VSYNC; | ||||
| 
 | ||||
| 	I915_WRITE(VIDEO_DIP_CTL, val); | ||||
| 	POSTING_READ(VIDEO_DIP_CTL); | ||||
| } | ||||
| 
 | ||||
| static void ibx_write_infoframe(struct drm_encoder *encoder, | ||||
| @ -160,46 +168,32 @@ static void ibx_write_infoframe(struct drm_encoder *encoder, | ||||
| 	struct drm_device *dev = encoder->dev; | ||||
| 	struct drm_i915_private *dev_priv = dev->dev_private; | ||||
| 	struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); | ||||
| 	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); | ||||
| 	int reg = TVIDEO_DIP_CTL(intel_crtc->pipe); | ||||
| 	unsigned i, len = DIP_HEADER_SIZE + frame->len; | ||||
| 	u32 val = I915_READ(reg); | ||||
| 
 | ||||
| 	val &= ~VIDEO_DIP_PORT_MASK; | ||||
| 	switch (intel_hdmi->sdvox_reg) { | ||||
| 	case HDMIB: | ||||
| 		val |= VIDEO_DIP_PORT_B; | ||||
| 		break; | ||||
| 	case HDMIC: | ||||
| 		val |= VIDEO_DIP_PORT_C; | ||||
| 		break; | ||||
| 	case HDMID: | ||||
| 		val |= VIDEO_DIP_PORT_D; | ||||
| 		break; | ||||
| 	default: | ||||
| 		return; | ||||
| 	} | ||||
| 
 | ||||
| 	intel_wait_for_vblank(dev, intel_crtc->pipe); | ||||
| 	WARN(!(val & VIDEO_DIP_ENABLE), "Writing DIP with CTL reg disabled\n"); | ||||
| 
 | ||||
| 	val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */ | ||||
| 	val |= g4x_infoframe_index(frame); | ||||
| 
 | ||||
| 	val &= ~g4x_infoframe_enable(frame); | ||||
| 	val |= VIDEO_DIP_ENABLE; | ||||
| 
 | ||||
| 	I915_WRITE(reg, val); | ||||
| 
 | ||||
| 	mmiowb(); | ||||
| 	for (i = 0; i < len; i += 4) { | ||||
| 		I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), *data); | ||||
| 		data++; | ||||
| 	} | ||||
| 	mmiowb(); | ||||
| 
 | ||||
| 	val |= g4x_infoframe_enable(frame); | ||||
| 	val &= ~VIDEO_DIP_FREQ_MASK; | ||||
| 	val |= VIDEO_DIP_FREQ_VSYNC; | ||||
| 
 | ||||
| 	I915_WRITE(reg, val); | ||||
| 	POSTING_READ(reg); | ||||
| } | ||||
| 
 | ||||
| static void cpt_write_infoframe(struct drm_encoder *encoder, | ||||
| @ -213,32 +207,31 @@ static void cpt_write_infoframe(struct drm_encoder *encoder, | ||||
| 	unsigned i, len = DIP_HEADER_SIZE + frame->len; | ||||
| 	u32 val = I915_READ(reg); | ||||
| 
 | ||||
| 	intel_wait_for_vblank(dev, intel_crtc->pipe); | ||||
| 	WARN(!(val & VIDEO_DIP_ENABLE), "Writing DIP with CTL reg disabled\n"); | ||||
| 
 | ||||
| 	val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */ | ||||
| 	val |= g4x_infoframe_index(frame); | ||||
| 
 | ||||
| 	/* The DIP control register spec says that we need to update the AVI
 | ||||
| 	 * infoframe without clearing its enable bit */ | ||||
| 	if (frame->type == DIP_TYPE_AVI) | ||||
| 		val |= VIDEO_DIP_ENABLE_AVI; | ||||
| 	else | ||||
| 	if (frame->type != DIP_TYPE_AVI) | ||||
| 		val &= ~g4x_infoframe_enable(frame); | ||||
| 
 | ||||
| 	val |= VIDEO_DIP_ENABLE; | ||||
| 
 | ||||
| 	I915_WRITE(reg, val); | ||||
| 
 | ||||
| 	mmiowb(); | ||||
| 	for (i = 0; i < len; i += 4) { | ||||
| 		I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), *data); | ||||
| 		data++; | ||||
| 	} | ||||
| 	mmiowb(); | ||||
| 
 | ||||
| 	val |= g4x_infoframe_enable(frame); | ||||
| 	val &= ~VIDEO_DIP_FREQ_MASK; | ||||
| 	val |= VIDEO_DIP_FREQ_VSYNC; | ||||
| 
 | ||||
| 	I915_WRITE(reg, val); | ||||
| 	POSTING_READ(reg); | ||||
| } | ||||
| 
 | ||||
| static void vlv_write_infoframe(struct drm_encoder *encoder, | ||||
| @ -252,26 +245,28 @@ static void vlv_write_infoframe(struct drm_encoder *encoder, | ||||
| 	unsigned i, len = DIP_HEADER_SIZE + frame->len; | ||||
| 	u32 val = I915_READ(reg); | ||||
| 
 | ||||
| 	intel_wait_for_vblank(dev, intel_crtc->pipe); | ||||
| 	WARN(!(val & VIDEO_DIP_ENABLE), "Writing DIP with CTL reg disabled\n"); | ||||
| 
 | ||||
| 	val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */ | ||||
| 	val |= g4x_infoframe_index(frame); | ||||
| 
 | ||||
| 	val &= ~g4x_infoframe_enable(frame); | ||||
| 	val |= VIDEO_DIP_ENABLE; | ||||
| 
 | ||||
| 	I915_WRITE(reg, val); | ||||
| 
 | ||||
| 	mmiowb(); | ||||
| 	for (i = 0; i < len; i += 4) { | ||||
| 		I915_WRITE(VLV_TVIDEO_DIP_DATA(intel_crtc->pipe), *data); | ||||
| 		data++; | ||||
| 	} | ||||
| 	mmiowb(); | ||||
| 
 | ||||
| 	val |= g4x_infoframe_enable(frame); | ||||
| 	val &= ~VIDEO_DIP_FREQ_MASK; | ||||
| 	val |= VIDEO_DIP_FREQ_VSYNC; | ||||
| 
 | ||||
| 	I915_WRITE(reg, val); | ||||
| 	POSTING_READ(reg); | ||||
| } | ||||
| 
 | ||||
| static void hsw_write_infoframe(struct drm_encoder *encoder, | ||||
| @ -289,18 +284,19 @@ static void hsw_write_infoframe(struct drm_encoder *encoder, | ||||
| 	if (data_reg == 0) | ||||
| 		return; | ||||
| 
 | ||||
| 	intel_wait_for_vblank(dev, intel_crtc->pipe); | ||||
| 
 | ||||
| 	val &= ~hsw_infoframe_enable(frame); | ||||
| 	I915_WRITE(ctl_reg, val); | ||||
| 
 | ||||
| 	mmiowb(); | ||||
| 	for (i = 0; i < len; i += 4) { | ||||
| 		I915_WRITE(data_reg + i, *data); | ||||
| 		data++; | ||||
| 	} | ||||
| 	mmiowb(); | ||||
| 
 | ||||
| 	val |= hsw_infoframe_enable(frame); | ||||
| 	I915_WRITE(ctl_reg, val); | ||||
| 	POSTING_READ(ctl_reg); | ||||
| } | ||||
| 
 | ||||
| static void intel_set_infoframe(struct drm_encoder *encoder, | ||||
| @ -308,14 +304,11 @@ static void intel_set_infoframe(struct drm_encoder *encoder, | ||||
| { | ||||
| 	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); | ||||
| 
 | ||||
| 	if (!intel_hdmi->has_hdmi_sink) | ||||
| 		return; | ||||
| 
 | ||||
| 	intel_dip_infoframe_csum(frame); | ||||
| 	intel_hdmi->write_infoframe(encoder, frame); | ||||
| } | ||||
| 
 | ||||
| void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder, | ||||
| static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder, | ||||
| 					 struct drm_display_mode *adjusted_mode) | ||||
| { | ||||
| 	struct dip_infoframe avi_if = { | ||||
| @ -330,7 +323,7 @@ void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder, | ||||
| 	intel_set_infoframe(encoder, &avi_if); | ||||
| } | ||||
| 
 | ||||
| void intel_hdmi_set_spd_infoframe(struct drm_encoder *encoder) | ||||
| static void intel_hdmi_set_spd_infoframe(struct drm_encoder *encoder) | ||||
| { | ||||
| 	struct dip_infoframe spd_if; | ||||
| 
 | ||||
| @ -345,6 +338,223 @@ void intel_hdmi_set_spd_infoframe(struct drm_encoder *encoder) | ||||
| 	intel_set_infoframe(encoder, &spd_if); | ||||
| } | ||||
| 
 | ||||
| static void g4x_set_infoframes(struct drm_encoder *encoder, | ||||
| 			       struct drm_display_mode *adjusted_mode) | ||||
| { | ||||
| 	struct drm_i915_private *dev_priv = encoder->dev->dev_private; | ||||
| 	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); | ||||
| 	u32 reg = VIDEO_DIP_CTL; | ||||
| 	u32 val = I915_READ(reg); | ||||
| 	u32 port; | ||||
| 
 | ||||
| 	assert_hdmi_port_disabled(intel_hdmi); | ||||
| 
 | ||||
| 	/* If the registers were not initialized yet, they might be zeroes,
 | ||||
| 	 * which means we're selecting the AVI DIP and we're setting its | ||||
| 	 * frequency to once. This seems to really confuse the HW and make | ||||
| 	 * things stop working (the register spec says the AVI always needs to | ||||
| 	 * be sent every VSync). So here we avoid writing to the register more | ||||
| 	 * than we need and also explicitly select the AVI DIP and explicitly | ||||
| 	 * set its frequency to every VSync. Avoiding to write it twice seems to | ||||
| 	 * be enough to solve the problem, but being defensive shouldn't hurt us | ||||
| 	 * either. */ | ||||
| 	val |= VIDEO_DIP_SELECT_AVI | VIDEO_DIP_FREQ_VSYNC; | ||||
| 
 | ||||
| 	if (!intel_hdmi->has_hdmi_sink) { | ||||
| 		if (!(val & VIDEO_DIP_ENABLE)) | ||||
| 			return; | ||||
| 		val &= ~VIDEO_DIP_ENABLE; | ||||
| 		I915_WRITE(reg, val); | ||||
| 		POSTING_READ(reg); | ||||
| 		return; | ||||
| 	} | ||||
| 
 | ||||
| 	switch (intel_hdmi->sdvox_reg) { | ||||
| 	case SDVOB: | ||||
| 		port = VIDEO_DIP_PORT_B; | ||||
| 		break; | ||||
| 	case SDVOC: | ||||
| 		port = VIDEO_DIP_PORT_C; | ||||
| 		break; | ||||
| 	default: | ||||
| 		return; | ||||
| 	} | ||||
| 
 | ||||
| 	if (port != (val & VIDEO_DIP_PORT_MASK)) { | ||||
| 		if (val & VIDEO_DIP_ENABLE) { | ||||
| 			val &= ~VIDEO_DIP_ENABLE; | ||||
| 			I915_WRITE(reg, val); | ||||
| 			POSTING_READ(reg); | ||||
| 		} | ||||
| 		val &= ~VIDEO_DIP_PORT_MASK; | ||||
| 		val |= port; | ||||
| 	} | ||||
| 
 | ||||
| 	val |= VIDEO_DIP_ENABLE; | ||||
| 	val &= ~VIDEO_DIP_ENABLE_VENDOR; | ||||
| 
 | ||||
| 	I915_WRITE(reg, val); | ||||
| 	POSTING_READ(reg); | ||||
| 
 | ||||
| 	intel_hdmi_set_avi_infoframe(encoder, adjusted_mode); | ||||
| 	intel_hdmi_set_spd_infoframe(encoder); | ||||
| } | ||||
| 
 | ||||
| static void ibx_set_infoframes(struct drm_encoder *encoder, | ||||
| 			       struct drm_display_mode *adjusted_mode) | ||||
| { | ||||
| 	struct drm_i915_private *dev_priv = encoder->dev->dev_private; | ||||
| 	struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); | ||||
| 	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); | ||||
| 	u32 reg = TVIDEO_DIP_CTL(intel_crtc->pipe); | ||||
| 	u32 val = I915_READ(reg); | ||||
| 	u32 port; | ||||
| 
 | ||||
| 	assert_hdmi_port_disabled(intel_hdmi); | ||||
| 
 | ||||
| 	/* See the big comment in g4x_set_infoframes() */ | ||||
| 	val |= VIDEO_DIP_SELECT_AVI | VIDEO_DIP_FREQ_VSYNC; | ||||
| 
 | ||||
| 	if (!intel_hdmi->has_hdmi_sink) { | ||||
| 		if (!(val & VIDEO_DIP_ENABLE)) | ||||
| 			return; | ||||
| 		val &= ~VIDEO_DIP_ENABLE; | ||||
| 		I915_WRITE(reg, val); | ||||
| 		POSTING_READ(reg); | ||||
| 		return; | ||||
| 	} | ||||
| 
 | ||||
| 	switch (intel_hdmi->sdvox_reg) { | ||||
| 	case HDMIB: | ||||
| 		port = VIDEO_DIP_PORT_B; | ||||
| 		break; | ||||
| 	case HDMIC: | ||||
| 		port = VIDEO_DIP_PORT_C; | ||||
| 		break; | ||||
| 	case HDMID: | ||||
| 		port = VIDEO_DIP_PORT_D; | ||||
| 		break; | ||||
| 	default: | ||||
| 		return; | ||||
| 	} | ||||
| 
 | ||||
| 	if (port != (val & VIDEO_DIP_PORT_MASK)) { | ||||
| 		if (val & VIDEO_DIP_ENABLE) { | ||||
| 			val &= ~VIDEO_DIP_ENABLE; | ||||
| 			I915_WRITE(reg, val); | ||||
| 			POSTING_READ(reg); | ||||
| 		} | ||||
| 		val &= ~VIDEO_DIP_PORT_MASK; | ||||
| 		val |= port; | ||||
| 	} | ||||
| 
 | ||||
| 	val |= VIDEO_DIP_ENABLE; | ||||
| 	val &= ~(VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT | | ||||
| 		 VIDEO_DIP_ENABLE_GCP); | ||||
| 
 | ||||
| 	I915_WRITE(reg, val); | ||||
| 	POSTING_READ(reg); | ||||
| 
 | ||||
| 	intel_hdmi_set_avi_infoframe(encoder, adjusted_mode); | ||||
| 	intel_hdmi_set_spd_infoframe(encoder); | ||||
| } | ||||
| 
 | ||||
| static void cpt_set_infoframes(struct drm_encoder *encoder, | ||||
| 			       struct drm_display_mode *adjusted_mode) | ||||
| { | ||||
| 	struct drm_i915_private *dev_priv = encoder->dev->dev_private; | ||||
| 	struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); | ||||
| 	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); | ||||
| 	u32 reg = TVIDEO_DIP_CTL(intel_crtc->pipe); | ||||
| 	u32 val = I915_READ(reg); | ||||
| 
 | ||||
| 	assert_hdmi_port_disabled(intel_hdmi); | ||||
| 
 | ||||
| 	/* See the big comment in g4x_set_infoframes() */ | ||||
| 	val |= VIDEO_DIP_SELECT_AVI | VIDEO_DIP_FREQ_VSYNC; | ||||
| 
 | ||||
| 	if (!intel_hdmi->has_hdmi_sink) { | ||||
| 		if (!(val & VIDEO_DIP_ENABLE)) | ||||
| 			return; | ||||
| 		val &= ~(VIDEO_DIP_ENABLE | VIDEO_DIP_ENABLE_AVI); | ||||
| 		I915_WRITE(reg, val); | ||||
| 		POSTING_READ(reg); | ||||
| 		return; | ||||
| 	} | ||||
| 
 | ||||
| 	/* Set both together, unset both together: see the spec. */ | ||||
| 	val |= VIDEO_DIP_ENABLE | VIDEO_DIP_ENABLE_AVI; | ||||
| 	val &= ~(VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT | | ||||
| 		 VIDEO_DIP_ENABLE_GCP); | ||||
| 
 | ||||
| 	I915_WRITE(reg, val); | ||||
| 	POSTING_READ(reg); | ||||
| 
 | ||||
| 	intel_hdmi_set_avi_infoframe(encoder, adjusted_mode); | ||||
| 	intel_hdmi_set_spd_infoframe(encoder); | ||||
| } | ||||
| 
 | ||||
| static void vlv_set_infoframes(struct drm_encoder *encoder, | ||||
| 			       struct drm_display_mode *adjusted_mode) | ||||
| { | ||||
| 	struct drm_i915_private *dev_priv = encoder->dev->dev_private; | ||||
| 	struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); | ||||
| 	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); | ||||
| 	u32 reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe); | ||||
| 	u32 val = I915_READ(reg); | ||||
| 
 | ||||
| 	assert_hdmi_port_disabled(intel_hdmi); | ||||
| 
 | ||||
| 	/* See the big comment in g4x_set_infoframes() */ | ||||
| 	val |= VIDEO_DIP_SELECT_AVI | VIDEO_DIP_FREQ_VSYNC; | ||||
| 
 | ||||
| 	if (!intel_hdmi->has_hdmi_sink) { | ||||
| 		if (!(val & VIDEO_DIP_ENABLE)) | ||||
| 			return; | ||||
| 		val &= ~VIDEO_DIP_ENABLE; | ||||
| 		I915_WRITE(reg, val); | ||||
| 		POSTING_READ(reg); | ||||
| 		return; | ||||
| 	} | ||||
| 
 | ||||
| 	val |= VIDEO_DIP_ENABLE; | ||||
| 	val &= ~(VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT | | ||||
| 		 VIDEO_DIP_ENABLE_GCP); | ||||
| 
 | ||||
| 	I915_WRITE(reg, val); | ||||
| 	POSTING_READ(reg); | ||||
| 
 | ||||
| 	intel_hdmi_set_avi_infoframe(encoder, adjusted_mode); | ||||
| 	intel_hdmi_set_spd_infoframe(encoder); | ||||
| } | ||||
| 
 | ||||
| static void hsw_set_infoframes(struct drm_encoder *encoder, | ||||
| 			       struct drm_display_mode *adjusted_mode) | ||||
| { | ||||
| 	struct drm_i915_private *dev_priv = encoder->dev->dev_private; | ||||
| 	struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); | ||||
| 	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); | ||||
| 	u32 reg = HSW_TVIDEO_DIP_CTL(intel_crtc->pipe); | ||||
| 	u32 val = I915_READ(reg); | ||||
| 
 | ||||
| 	assert_hdmi_port_disabled(intel_hdmi); | ||||
| 
 | ||||
| 	if (!intel_hdmi->has_hdmi_sink) { | ||||
| 		I915_WRITE(reg, 0); | ||||
| 		POSTING_READ(reg); | ||||
| 		return; | ||||
| 	} | ||||
| 
 | ||||
| 	val &= ~(VIDEO_DIP_ENABLE_VSC_HSW | VIDEO_DIP_ENABLE_GCP_HSW | | ||||
| 		 VIDEO_DIP_ENABLE_VS_HSW | VIDEO_DIP_ENABLE_GMP_HSW); | ||||
| 
 | ||||
| 	I915_WRITE(reg, val); | ||||
| 	POSTING_READ(reg); | ||||
| 
 | ||||
| 	intel_hdmi_set_avi_infoframe(encoder, adjusted_mode); | ||||
| 	intel_hdmi_set_spd_infoframe(encoder); | ||||
| } | ||||
| 
 | ||||
| static void intel_hdmi_mode_set(struct drm_encoder *encoder, | ||||
| 				struct drm_display_mode *mode, | ||||
| 				struct drm_display_mode *adjusted_mode) | ||||
| @ -355,7 +565,7 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder, | ||||
| 	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); | ||||
| 	u32 sdvox; | ||||
| 
 | ||||
| 	sdvox = SDVO_ENCODING_HDMI | SDVO_BORDER_ENABLE; | ||||
| 	sdvox = SDVO_ENCODING_HDMI; | ||||
| 	if (!HAS_PCH_SPLIT(dev)) | ||||
| 		sdvox |= intel_hdmi->color_range; | ||||
| 	if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) | ||||
| @ -382,14 +592,13 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder, | ||||
| 
 | ||||
| 	if (HAS_PCH_CPT(dev)) | ||||
| 		sdvox |= PORT_TRANS_SEL_CPT(intel_crtc->pipe); | ||||
| 	else if (intel_crtc->pipe == 1) | ||||
| 	else if (intel_crtc->pipe == PIPE_B) | ||||
| 		sdvox |= SDVO_PIPE_B_SELECT; | ||||
| 
 | ||||
| 	I915_WRITE(intel_hdmi->sdvox_reg, sdvox); | ||||
| 	POSTING_READ(intel_hdmi->sdvox_reg); | ||||
| 
 | ||||
| 	intel_hdmi_set_avi_infoframe(encoder, adjusted_mode); | ||||
| 	intel_hdmi_set_spd_infoframe(encoder); | ||||
| 	intel_hdmi->set_infoframes(encoder, adjusted_mode); | ||||
| } | ||||
| 
 | ||||
| static void intel_hdmi_dpms(struct drm_encoder *encoder, int mode) | ||||
| @ -405,6 +614,36 @@ static void intel_hdmi_dpms(struct drm_encoder *encoder, int mode) | ||||
| 
 | ||||
| 	temp = I915_READ(intel_hdmi->sdvox_reg); | ||||
| 
 | ||||
| 	/* HW workaround for IBX, we need to move the port to transcoder A
 | ||||
| 	 * before disabling it. */ | ||||
| 	if (HAS_PCH_IBX(dev)) { | ||||
| 		struct drm_crtc *crtc = encoder->crtc; | ||||
| 		int pipe = crtc ? to_intel_crtc(crtc)->pipe : -1; | ||||
| 
 | ||||
| 		if (mode != DRM_MODE_DPMS_ON) { | ||||
| 			if (temp & SDVO_PIPE_B_SELECT) { | ||||
| 				temp &= ~SDVO_PIPE_B_SELECT; | ||||
| 				I915_WRITE(intel_hdmi->sdvox_reg, temp); | ||||
| 				POSTING_READ(intel_hdmi->sdvox_reg); | ||||
| 
 | ||||
| 				/* Again we need to write this twice. */ | ||||
| 				I915_WRITE(intel_hdmi->sdvox_reg, temp); | ||||
| 				POSTING_READ(intel_hdmi->sdvox_reg); | ||||
| 
 | ||||
| 				/* Transcoder selection bits only update
 | ||||
| 				 * effectively on vblank. */ | ||||
| 				if (crtc) | ||||
| 					intel_wait_for_vblank(dev, pipe); | ||||
| 				else | ||||
| 					msleep(50); | ||||
| 			} | ||||
| 		} else { | ||||
| 			/* Restore the transcoder select bit. */ | ||||
| 			if (pipe == PIPE_B) | ||||
| 				enable_bits |= SDVO_PIPE_B_SELECT; | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	/* HW workaround, need to toggle enable bit off and on for 12bpc, but
 | ||||
| 	 * we do this anyway which shows more stable in testing. | ||||
| 	 */ | ||||
| @ -446,12 +685,33 @@ static int intel_hdmi_mode_valid(struct drm_connector *connector, | ||||
| } | ||||
| 
 | ||||
| static bool intel_hdmi_mode_fixup(struct drm_encoder *encoder, | ||||
| 				  struct drm_display_mode *mode, | ||||
| 				  const struct drm_display_mode *mode, | ||||
| 				  struct drm_display_mode *adjusted_mode) | ||||
| { | ||||
| 	return true; | ||||
| } | ||||
| 
 | ||||
| static bool g4x_hdmi_connected(struct intel_hdmi *intel_hdmi) | ||||
| { | ||||
| 	struct drm_device *dev = intel_hdmi->base.base.dev; | ||||
| 	struct drm_i915_private *dev_priv = dev->dev_private; | ||||
| 	uint32_t bit; | ||||
| 
 | ||||
| 	switch (intel_hdmi->sdvox_reg) { | ||||
| 	case SDVOB: | ||||
| 		bit = HDMIB_HOTPLUG_LIVE_STATUS; | ||||
| 		break; | ||||
| 	case SDVOC: | ||||
| 		bit = HDMIC_HOTPLUG_LIVE_STATUS; | ||||
| 		break; | ||||
| 	default: | ||||
| 		bit = 0; | ||||
| 		break; | ||||
| 	} | ||||
| 
 | ||||
| 	return I915_READ(PORT_HOTPLUG_STAT) & bit; | ||||
| } | ||||
| 
 | ||||
| static enum drm_connector_status | ||||
| intel_hdmi_detect(struct drm_connector *connector, bool force) | ||||
| { | ||||
| @ -460,6 +720,9 @@ intel_hdmi_detect(struct drm_connector *connector, bool force) | ||||
| 	struct edid *edid; | ||||
| 	enum drm_connector_status status = connector_status_disconnected; | ||||
| 
 | ||||
| 	if (IS_G4X(connector->dev) && !g4x_hdmi_connected(intel_hdmi)) | ||||
| 		return status; | ||||
| 
 | ||||
| 	intel_hdmi->has_hdmi_sink = false; | ||||
| 	intel_hdmi->has_audio = false; | ||||
| 	edid = drm_get_edid(connector, | ||||
| @ -633,7 +896,6 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg) | ||||
| 	struct intel_encoder *intel_encoder; | ||||
| 	struct intel_connector *intel_connector; | ||||
| 	struct intel_hdmi *intel_hdmi; | ||||
| 	int i; | ||||
| 
 | ||||
| 	intel_hdmi = kzalloc(sizeof(struct intel_hdmi), GFP_KERNEL); | ||||
| 	if (!intel_hdmi) | ||||
| @ -710,26 +972,19 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg) | ||||
| 
 | ||||
| 	if (!HAS_PCH_SPLIT(dev)) { | ||||
| 		intel_hdmi->write_infoframe = g4x_write_infoframe; | ||||
| 		I915_WRITE(VIDEO_DIP_CTL, 0); | ||||
| 		intel_hdmi->set_infoframes = g4x_set_infoframes; | ||||
| 	} else if (IS_VALLEYVIEW(dev)) { | ||||
| 		intel_hdmi->write_infoframe = vlv_write_infoframe; | ||||
| 		for_each_pipe(i) | ||||
| 			I915_WRITE(VLV_TVIDEO_DIP_CTL(i), 0); | ||||
| 		intel_hdmi->set_infoframes = vlv_set_infoframes; | ||||
| 	} else if (IS_HASWELL(dev)) { | ||||
| 		/* FIXME: Haswell has a new set of DIP frame registers, but we are
 | ||||
| 		 * just doing the minimal required for HDMI to work at this stage. | ||||
| 		 */ | ||||
| 		intel_hdmi->write_infoframe = hsw_write_infoframe; | ||||
| 		for_each_pipe(i) | ||||
| 			I915_WRITE(HSW_TVIDEO_DIP_CTL(i), 0); | ||||
| 		intel_hdmi->set_infoframes = hsw_set_infoframes; | ||||
| 	} else if (HAS_PCH_IBX(dev)) { | ||||
| 		intel_hdmi->write_infoframe = ibx_write_infoframe; | ||||
| 		for_each_pipe(i) | ||||
| 			I915_WRITE(TVIDEO_DIP_CTL(i), 0); | ||||
| 		intel_hdmi->set_infoframes = ibx_set_infoframes; | ||||
| 	} else { | ||||
| 		intel_hdmi->write_infoframe = cpt_write_infoframe; | ||||
| 		for_each_pipe(i) | ||||
| 			I915_WRITE(TVIDEO_DIP_CTL(i), 0); | ||||
| 		intel_hdmi->set_infoframes = cpt_set_infoframes; | ||||
| 	} | ||||
| 
 | ||||
| 	if (IS_HASWELL(dev)) | ||||
|  | ||||
| @ -71,6 +71,7 @@ static struct intel_lvds *intel_attached_lvds(struct drm_connector *connector) | ||||
| static void intel_lvds_enable(struct intel_lvds *intel_lvds) | ||||
| { | ||||
| 	struct drm_device *dev = intel_lvds->base.base.dev; | ||||
| 	struct intel_crtc *intel_crtc = to_intel_crtc(intel_lvds->base.base.crtc); | ||||
| 	struct drm_i915_private *dev_priv = dev->dev_private; | ||||
| 	u32 ctl_reg, lvds_reg, stat_reg; | ||||
| 
 | ||||
| @ -107,7 +108,7 @@ static void intel_lvds_enable(struct intel_lvds *intel_lvds) | ||||
| 	if (wait_for((I915_READ(stat_reg) & PP_ON) != 0, 1000)) | ||||
| 		DRM_ERROR("timed out waiting for panel to power on\n"); | ||||
| 
 | ||||
| 	intel_panel_enable_backlight(dev); | ||||
| 	intel_panel_enable_backlight(dev, intel_crtc->pipe); | ||||
| } | ||||
| 
 | ||||
| static void intel_lvds_disable(struct intel_lvds *intel_lvds) | ||||
| @ -228,14 +229,14 @@ static inline u32 panel_fitter_scaling(u32 source, u32 target) | ||||
| } | ||||
| 
 | ||||
| static bool intel_lvds_mode_fixup(struct drm_encoder *encoder, | ||||
| 				  struct drm_display_mode *mode, | ||||
| 				  const struct drm_display_mode *mode, | ||||
| 				  struct drm_display_mode *adjusted_mode) | ||||
| { | ||||
| 	struct drm_device *dev = encoder->dev; | ||||
| 	struct drm_i915_private *dev_priv = dev->dev_private; | ||||
| 	struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); | ||||
| 	struct intel_lvds *intel_lvds = to_intel_lvds(encoder); | ||||
| 	struct drm_encoder *tmp_encoder; | ||||
| 	struct intel_encoder *tmp_encoder; | ||||
| 	u32 pfit_control = 0, pfit_pgm_ratios = 0, border = 0; | ||||
| 	int pipe; | ||||
| 
 | ||||
| @ -246,8 +247,8 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder, | ||||
| 	} | ||||
| 
 | ||||
| 	/* Should never happen!! */ | ||||
| 	list_for_each_entry(tmp_encoder, &dev->mode_config.encoder_list, head) { | ||||
| 		if (tmp_encoder != encoder && tmp_encoder->crtc == encoder->crtc) { | ||||
| 	for_each_encoder_on_crtc(dev, encoder->crtc, tmp_encoder) { | ||||
| 		if (&tmp_encoder->base != encoder) { | ||||
| 			DRM_ERROR("Can't enable LVDS and another " | ||||
| 			       "encoder on the same pipe\n"); | ||||
| 			return false; | ||||
| @ -408,13 +409,7 @@ static void intel_lvds_prepare(struct drm_encoder *encoder) | ||||
| { | ||||
| 	struct intel_lvds *intel_lvds = to_intel_lvds(encoder); | ||||
| 
 | ||||
| 	/*
 | ||||
| 	 * Prior to Ironlake, we must disable the pipe if we want to adjust | ||||
| 	 * the panel fitter. However at all other times we can just reset | ||||
| 	 * the registers regardless. | ||||
| 	 */ | ||||
| 	if (!HAS_PCH_SPLIT(encoder->dev) && intel_lvds->pfit_dirty) | ||||
| 		intel_lvds_disable(intel_lvds); | ||||
| 	intel_lvds_disable(intel_lvds); | ||||
| } | ||||
| 
 | ||||
| static void intel_lvds_commit(struct drm_encoder *encoder) | ||||
| @ -777,6 +772,14 @@ static const struct dmi_system_id intel_no_lvds[] = { | ||||
| 			DMI_MATCH(DMI_BOARD_NAME, "MS-7469"), | ||||
| 		}, | ||||
| 	}, | ||||
| 	{ | ||||
| 		.callback = intel_no_lvds_dmi_callback, | ||||
| 		.ident = "ZOTAC ZBOXSD-ID12/ID13", | ||||
| 		.matches = { | ||||
| 			DMI_MATCH(DMI_BOARD_VENDOR, "ZOTAC"), | ||||
| 			DMI_MATCH(DMI_BOARD_NAME, "ZBOXSD-ID12/ID13"), | ||||
| 		}, | ||||
| 	}, | ||||
| 
 | ||||
| 	{ }	/* terminating entry */ | ||||
| }; | ||||
| @ -967,6 +970,8 @@ bool intel_lvds_init(struct drm_device *dev) | ||||
| 	intel_encoder->clone_mask = (1 << INTEL_LVDS_CLONE_BIT); | ||||
| 	if (HAS_PCH_SPLIT(dev)) | ||||
| 		intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2); | ||||
| 	else if (IS_GEN4(dev)) | ||||
| 		intel_encoder->crtc_mask = (1 << 0) | (1 << 1); | ||||
| 	else | ||||
| 		intel_encoder->crtc_mask = (1 << 1); | ||||
| 
 | ||||
| @ -1074,35 +1079,14 @@ bool intel_lvds_init(struct drm_device *dev) | ||||
| 		goto failed; | ||||
| 
 | ||||
| out: | ||||
| 	/*
 | ||||
| 	 * Unlock registers and just | ||||
| 	 * leave them unlocked | ||||
| 	 */ | ||||
| 	if (HAS_PCH_SPLIT(dev)) { | ||||
| 		u32 pwm; | ||||
| 
 | ||||
| 		pipe = (I915_READ(PCH_LVDS) & LVDS_PIPEB_SELECT) ? 1 : 0; | ||||
| 
 | ||||
| 		/* make sure PWM is enabled and locked to the LVDS pipe */ | ||||
| 		pwm = I915_READ(BLC_PWM_CPU_CTL2); | ||||
| 		if (pipe == 0 && (pwm & PWM_PIPE_B)) | ||||
| 			I915_WRITE(BLC_PWM_CPU_CTL2, pwm & ~PWM_ENABLE); | ||||
| 		if (pipe) | ||||
| 			pwm |= PWM_PIPE_B; | ||||
| 		else | ||||
| 			pwm &= ~PWM_PIPE_B; | ||||
| 		I915_WRITE(BLC_PWM_CPU_CTL2, pwm | PWM_ENABLE); | ||||
| 
 | ||||
| 		pwm = I915_READ(BLC_PWM_PCH_CTL1); | ||||
| 		pwm |= PWM_PCH_ENABLE; | ||||
| 		I915_WRITE(BLC_PWM_PCH_CTL1, pwm); | ||||
| 		/*
 | ||||
| 		 * Unlock registers and just | ||||
| 		 * leave them unlocked | ||||
| 		 */ | ||||
| 		I915_WRITE(PCH_PP_CONTROL, | ||||
| 			   I915_READ(PCH_PP_CONTROL) | PANEL_UNLOCK_REGS); | ||||
| 	} else { | ||||
| 		/*
 | ||||
| 		 * Unlock registers and just | ||||
| 		 * leave them unlocked | ||||
| 		 */ | ||||
| 		I915_WRITE(PP_CONTROL, | ||||
| 			   I915_READ(PP_CONTROL) | PANEL_UNLOCK_REGS); | ||||
| 	} | ||||
|  | ||||
| @ -32,34 +32,6 @@ | ||||
| #include "intel_drv.h" | ||||
| #include "i915_drv.h" | ||||
| 
 | ||||
| /**
 | ||||
|  * intel_ddc_probe | ||||
|  * | ||||
|  */ | ||||
| bool intel_ddc_probe(struct intel_encoder *intel_encoder, int ddc_bus) | ||||
| { | ||||
| 	struct drm_i915_private *dev_priv = intel_encoder->base.dev->dev_private; | ||||
| 	u8 out_buf[] = { 0x0, 0x0}; | ||||
| 	u8 buf[2]; | ||||
| 	struct i2c_msg msgs[] = { | ||||
| 		{ | ||||
| 			.addr = DDC_ADDR, | ||||
| 			.flags = 0, | ||||
| 			.len = 1, | ||||
| 			.buf = out_buf, | ||||
| 		}, | ||||
| 		{ | ||||
| 			.addr = DDC_ADDR, | ||||
| 			.flags = I2C_M_RD, | ||||
| 			.len = 1, | ||||
| 			.buf = buf, | ||||
| 		} | ||||
| 	}; | ||||
| 
 | ||||
| 	return i2c_transfer(intel_gmbus_get_adapter(dev_priv, ddc_bus), | ||||
| 			    msgs, 2) == 2; | ||||
| } | ||||
| 
 | ||||
| /**
 | ||||
|  * intel_ddc_get_modes - get modelist from monitor | ||||
|  * @connector: DRM connector device to use | ||||
|  | ||||
| @ -226,7 +226,7 @@ static int intel_overlay_do_wait_request(struct intel_overlay *overlay, | ||||
| 	} | ||||
| 	overlay->last_flip_req = request->seqno; | ||||
| 	overlay->flip_tail = tail; | ||||
| 	ret = i915_wait_request(ring, overlay->last_flip_req); | ||||
| 	ret = i915_wait_seqno(ring, overlay->last_flip_req); | ||||
| 	if (ret) | ||||
| 		return ret; | ||||
| 	i915_gem_retire_requests(dev); | ||||
| @ -452,7 +452,7 @@ static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay) | ||||
| 	if (overlay->last_flip_req == 0) | ||||
| 		return 0; | ||||
| 
 | ||||
| 	ret = i915_wait_request(ring, overlay->last_flip_req); | ||||
| 	ret = i915_wait_seqno(ring, overlay->last_flip_req); | ||||
| 	if (ret) | ||||
| 		return ret; | ||||
| 	i915_gem_retire_requests(dev); | ||||
|  | ||||
| @ -56,7 +56,7 @@ intel_fixed_panel_mode(struct drm_display_mode *fixed_mode, | ||||
| void | ||||
| intel_pch_panel_fitting(struct drm_device *dev, | ||||
| 			int fitting_mode, | ||||
| 			struct drm_display_mode *mode, | ||||
| 			const struct drm_display_mode *mode, | ||||
| 			struct drm_display_mode *adjusted_mode) | ||||
| { | ||||
| 	struct drm_i915_private *dev_priv = dev->dev_private; | ||||
| @ -287,9 +287,24 @@ void intel_panel_disable_backlight(struct drm_device *dev) | ||||
| 
 | ||||
| 	dev_priv->backlight_enabled = false; | ||||
| 	intel_panel_actually_set_backlight(dev, 0); | ||||
| 
 | ||||
| 	if (INTEL_INFO(dev)->gen >= 4) { | ||||
| 		uint32_t reg, tmp; | ||||
| 
 | ||||
| 		reg = HAS_PCH_SPLIT(dev) ? BLC_PWM_CPU_CTL2 : BLC_PWM_CTL2; | ||||
| 
 | ||||
| 		I915_WRITE(reg, I915_READ(reg) & ~BLM_PWM_ENABLE); | ||||
| 
 | ||||
| 		if (HAS_PCH_SPLIT(dev)) { | ||||
| 			tmp = I915_READ(BLC_PWM_PCH_CTL1); | ||||
| 			tmp &= ~BLM_PCH_PWM_ENABLE; | ||||
| 			I915_WRITE(BLC_PWM_PCH_CTL1, tmp); | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| void intel_panel_enable_backlight(struct drm_device *dev) | ||||
| void intel_panel_enable_backlight(struct drm_device *dev, | ||||
| 				  enum pipe pipe) | ||||
| { | ||||
| 	struct drm_i915_private *dev_priv = dev->dev_private; | ||||
| 
 | ||||
| @ -298,6 +313,40 @@ void intel_panel_enable_backlight(struct drm_device *dev) | ||||
| 
 | ||||
| 	dev_priv->backlight_enabled = true; | ||||
| 	intel_panel_actually_set_backlight(dev, dev_priv->backlight_level); | ||||
| 
 | ||||
| 	if (INTEL_INFO(dev)->gen >= 4) { | ||||
| 		uint32_t reg, tmp; | ||||
| 
 | ||||
| 		reg = HAS_PCH_SPLIT(dev) ? BLC_PWM_CPU_CTL2 : BLC_PWM_CTL2; | ||||
| 
 | ||||
| 
 | ||||
| 		tmp = I915_READ(reg); | ||||
| 
 | ||||
| 		/* Note that this can also get called through dpms changes. And
 | ||||
| 		 * we don't track the backlight dpms state, hence check whether | ||||
| 		 * we have to do anything first. */ | ||||
| 		if (tmp & BLM_PWM_ENABLE) | ||||
| 			return; | ||||
| 
 | ||||
| 		if (dev_priv->num_pipe == 3) | ||||
| 			tmp &= ~BLM_PIPE_SELECT_IVB; | ||||
| 		else | ||||
| 			tmp &= ~BLM_PIPE_SELECT; | ||||
| 
 | ||||
| 		tmp |= BLM_PIPE(pipe); | ||||
| 		tmp &= ~BLM_PWM_ENABLE; | ||||
| 
 | ||||
| 		I915_WRITE(reg, tmp); | ||||
| 		POSTING_READ(reg); | ||||
| 		I915_WRITE(reg, tmp | BLM_PWM_ENABLE); | ||||
| 
 | ||||
| 		if (HAS_PCH_SPLIT(dev)) { | ||||
| 			tmp = I915_READ(BLC_PWM_PCH_CTL1); | ||||
| 			tmp |= BLM_PCH_PWM_ENABLE; | ||||
| 			tmp &= ~BLM_PCH_OVERRIDE_ENABLE; | ||||
| 			I915_WRITE(BLC_PWM_PCH_CTL1, tmp); | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| static void intel_panel_init_backlight(struct drm_device *dev) | ||||
|  | ||||
| @ -387,8 +387,6 @@ void intel_update_fbc(struct drm_device *dev) | ||||
| 	struct drm_i915_gem_object *obj; | ||||
| 	int enable_fbc; | ||||
| 
 | ||||
| 	DRM_DEBUG_KMS("\n"); | ||||
| 
 | ||||
| 	if (!i915_powersave) | ||||
| 		return; | ||||
| 
 | ||||
| @ -405,7 +403,9 @@ void intel_update_fbc(struct drm_device *dev) | ||||
| 	 *   - going to an unsupported config (interlace, pixel multiply, etc.) | ||||
| 	 */ | ||||
| 	list_for_each_entry(tmp_crtc, &dev->mode_config.crtc_list, head) { | ||||
| 		if (tmp_crtc->enabled && tmp_crtc->fb) { | ||||
| 		if (tmp_crtc->enabled && | ||||
| 		    !to_intel_crtc(tmp_crtc)->primary_disabled && | ||||
| 		    tmp_crtc->fb) { | ||||
| 			if (crtc) { | ||||
| 				DRM_DEBUG_KMS("more than one pipe active, disabling compression\n"); | ||||
| 				dev_priv->no_fbc_reason = FBC_MULTIPLE_PIPES; | ||||
| @ -2182,7 +2182,7 @@ bool ironlake_set_drps(struct drm_device *dev, u8 val) | ||||
| 	return true; | ||||
| } | ||||
| 
 | ||||
| void ironlake_enable_drps(struct drm_device *dev) | ||||
| static void ironlake_enable_drps(struct drm_device *dev) | ||||
| { | ||||
| 	struct drm_i915_private *dev_priv = dev->dev_private; | ||||
| 	u32 rgvmodectl = I915_READ(MEMMODECTL); | ||||
| @ -2246,7 +2246,7 @@ void ironlake_enable_drps(struct drm_device *dev) | ||||
| 	getrawmonotonic(&dev_priv->last_time2); | ||||
| } | ||||
| 
 | ||||
| void ironlake_disable_drps(struct drm_device *dev) | ||||
| static void ironlake_disable_drps(struct drm_device *dev) | ||||
| { | ||||
| 	struct drm_i915_private *dev_priv = dev->dev_private; | ||||
| 	u16 rgvswctl = I915_READ16(MEMSWCTL); | ||||
| @ -2299,10 +2299,11 @@ void gen6_set_rps(struct drm_device *dev, u8 val) | ||||
| 	dev_priv->cur_delay = val; | ||||
| } | ||||
| 
 | ||||
| void gen6_disable_rps(struct drm_device *dev) | ||||
| static void gen6_disable_rps(struct drm_device *dev) | ||||
| { | ||||
| 	struct drm_i915_private *dev_priv = dev->dev_private; | ||||
| 
 | ||||
| 	I915_WRITE(GEN6_RC_CONTROL, 0); | ||||
| 	I915_WRITE(GEN6_RPNSWREQ, 1 << 31); | ||||
| 	I915_WRITE(GEN6_PMINTRMSK, 0xffffffff); | ||||
| 	I915_WRITE(GEN6_PMIER, 0); | ||||
| @ -2332,9 +2333,11 @@ int intel_enable_rc6(const struct drm_device *dev) | ||||
| 	if (INTEL_INFO(dev)->gen == 5) | ||||
| 		return 0; | ||||
| 
 | ||||
| 	/* Sorry Haswell, no RC6 for you for now. */ | ||||
| 	/* On Haswell, only RC6 is available. So let's enable it by default to
 | ||||
| 	 * provide better testing and coverage since the beginning. | ||||
| 	 */ | ||||
| 	if (IS_HASWELL(dev)) | ||||
| 		return 0; | ||||
| 		return INTEL_RC6_ENABLE; | ||||
| 
 | ||||
| 	/*
 | ||||
| 	 * Disable rc6 on Sandybridge | ||||
| @ -2347,8 +2350,9 @@ int intel_enable_rc6(const struct drm_device *dev) | ||||
| 	return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE); | ||||
| } | ||||
| 
 | ||||
| void gen6_enable_rps(struct drm_i915_private *dev_priv) | ||||
| static void gen6_enable_rps(struct drm_device *dev) | ||||
| { | ||||
| 	struct drm_i915_private *dev_priv = dev->dev_private; | ||||
| 	struct intel_ring_buffer *ring; | ||||
| 	u32 rp_state_cap; | ||||
| 	u32 gt_perf_status; | ||||
| @ -2357,6 +2361,8 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv) | ||||
| 	int rc6_mode; | ||||
| 	int i; | ||||
| 
 | ||||
| 	WARN_ON(!mutex_is_locked(&dev->struct_mutex)); | ||||
| 
 | ||||
| 	/* Here begins a magic sequence of register writes to enable
 | ||||
| 	 * auto-downclocking. | ||||
| 	 * | ||||
| @ -2364,7 +2370,6 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv) | ||||
| 	 * userspace... | ||||
| 	 */ | ||||
| 	I915_WRITE(GEN6_RC_STATE, 0); | ||||
| 	mutex_lock(&dev_priv->dev->struct_mutex); | ||||
| 
 | ||||
| 	/* Clear the DBG now so we don't confuse earlier errors */ | ||||
| 	if ((gtfifodbg = I915_READ(GTFIFODBG))) { | ||||
| @ -2400,20 +2405,24 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv) | ||||
| 	I915_WRITE(GEN6_RC6p_THRESHOLD, 100000); | ||||
| 	I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */ | ||||
| 
 | ||||
| 	/* Check if we are enabling RC6 */ | ||||
| 	rc6_mode = intel_enable_rc6(dev_priv->dev); | ||||
| 	if (rc6_mode & INTEL_RC6_ENABLE) | ||||
| 		rc6_mask |= GEN6_RC_CTL_RC6_ENABLE; | ||||
| 
 | ||||
| 	if (rc6_mode & INTEL_RC6p_ENABLE) | ||||
| 		rc6_mask |= GEN6_RC_CTL_RC6p_ENABLE; | ||||
| 	/* We don't use those on Haswell */ | ||||
| 	if (!IS_HASWELL(dev)) { | ||||
| 		if (rc6_mode & INTEL_RC6p_ENABLE) | ||||
| 			rc6_mask |= GEN6_RC_CTL_RC6p_ENABLE; | ||||
| 
 | ||||
| 	if (rc6_mode & INTEL_RC6pp_ENABLE) | ||||
| 		rc6_mask |= GEN6_RC_CTL_RC6pp_ENABLE; | ||||
| 		if (rc6_mode & INTEL_RC6pp_ENABLE) | ||||
| 			rc6_mask |= GEN6_RC_CTL_RC6pp_ENABLE; | ||||
| 	} | ||||
| 
 | ||||
| 	DRM_INFO("Enabling RC6 states: RC6 %s, RC6p %s, RC6pp %s\n", | ||||
| 			(rc6_mode & INTEL_RC6_ENABLE) ? "on" : "off", | ||||
| 			(rc6_mode & INTEL_RC6p_ENABLE) ? "on" : "off", | ||||
| 			(rc6_mode & INTEL_RC6pp_ENABLE) ? "on" : "off"); | ||||
| 			(rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ? "on" : "off", | ||||
| 			(rc6_mask & GEN6_RC_CTL_RC6p_ENABLE) ? "on" : "off", | ||||
| 			(rc6_mask & GEN6_RC_CTL_RC6pp_ENABLE) ? "on" : "off"); | ||||
| 
 | ||||
| 	I915_WRITE(GEN6_RC_CONTROL, | ||||
| 		   rc6_mask | | ||||
| @ -2431,10 +2440,19 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv) | ||||
| 	I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, | ||||
| 		   dev_priv->max_delay << 24 | | ||||
| 		   dev_priv->min_delay << 16); | ||||
| 	I915_WRITE(GEN6_RP_UP_THRESHOLD, 10000); | ||||
| 	I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 1000000); | ||||
| 	I915_WRITE(GEN6_RP_UP_EI, 100000); | ||||
| 	I915_WRITE(GEN6_RP_DOWN_EI, 5000000); | ||||
| 
 | ||||
| 	if (IS_HASWELL(dev)) { | ||||
| 		I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400); | ||||
| 		I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000); | ||||
| 		I915_WRITE(GEN6_RP_UP_EI, 66000); | ||||
| 		I915_WRITE(GEN6_RP_DOWN_EI, 350000); | ||||
| 	} else { | ||||
| 		I915_WRITE(GEN6_RP_UP_THRESHOLD, 10000); | ||||
| 		I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 1000000); | ||||
| 		I915_WRITE(GEN6_RP_UP_EI, 100000); | ||||
| 		I915_WRITE(GEN6_RP_DOWN_EI, 5000000); | ||||
| 	} | ||||
| 
 | ||||
| 	I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10); | ||||
| 	I915_WRITE(GEN6_RP_CONTROL, | ||||
| 		   GEN6_RP_MEDIA_TURBO | | ||||
| @ -2442,7 +2460,7 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv) | ||||
| 		   GEN6_RP_MEDIA_IS_GFX | | ||||
| 		   GEN6_RP_ENABLE | | ||||
| 		   GEN6_RP_UP_BUSY_AVG | | ||||
| 		   GEN6_RP_DOWN_IDLE_CONT); | ||||
| 		   (IS_HASWELL(dev) ? GEN7_RP_DOWN_IDLE_AVG : GEN6_RP_DOWN_IDLE_CONT)); | ||||
| 
 | ||||
| 	if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0, | ||||
| 		     500)) | ||||
| @ -2473,14 +2491,7 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv) | ||||
| 	gen6_set_rps(dev_priv->dev, (gt_perf_status & 0xff00) >> 8); | ||||
| 
 | ||||
| 	/* requires MSI enabled */ | ||||
| 	I915_WRITE(GEN6_PMIER, | ||||
| 		   GEN6_PM_MBOX_EVENT | | ||||
| 		   GEN6_PM_THERMAL_EVENT | | ||||
| 		   GEN6_PM_RP_DOWN_TIMEOUT | | ||||
| 		   GEN6_PM_RP_UP_THRESHOLD | | ||||
| 		   GEN6_PM_RP_DOWN_THRESHOLD | | ||||
| 		   GEN6_PM_RP_UP_EI_EXPIRED | | ||||
| 		   GEN6_PM_RP_DOWN_EI_EXPIRED); | ||||
| 	I915_WRITE(GEN6_PMIER, GEN6_PM_DEFERRED_EVENTS); | ||||
| 	spin_lock_irq(&dev_priv->rps_lock); | ||||
| 	WARN_ON(dev_priv->pm_iir != 0); | ||||
| 	I915_WRITE(GEN6_PMIMR, 0); | ||||
| @ -2489,15 +2500,17 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv) | ||||
| 	I915_WRITE(GEN6_PMINTRMSK, 0); | ||||
| 
 | ||||
| 	gen6_gt_force_wake_put(dev_priv); | ||||
| 	mutex_unlock(&dev_priv->dev->struct_mutex); | ||||
| } | ||||
| 
 | ||||
| void gen6_update_ring_freq(struct drm_i915_private *dev_priv) | ||||
| static void gen6_update_ring_freq(struct drm_device *dev) | ||||
| { | ||||
| 	struct drm_i915_private *dev_priv = dev->dev_private; | ||||
| 	int min_freq = 15; | ||||
| 	int gpu_freq, ia_freq, max_ia_freq; | ||||
| 	int scaling_factor = 180; | ||||
| 
 | ||||
| 	WARN_ON(!mutex_is_locked(&dev->struct_mutex)); | ||||
| 
 | ||||
| 	max_ia_freq = cpufreq_quick_get_max(0); | ||||
| 	/*
 | ||||
| 	 * Default to measured freq if none found, PCU will ensure we don't go | ||||
| @ -2509,8 +2522,6 @@ void gen6_update_ring_freq(struct drm_i915_private *dev_priv) | ||||
| 	/* Convert from kHz to MHz */ | ||||
| 	max_ia_freq /= 1000; | ||||
| 
 | ||||
| 	mutex_lock(&dev_priv->dev->struct_mutex); | ||||
| 
 | ||||
| 	/*
 | ||||
| 	 * For each potential GPU frequency, load a ring frequency we'd like | ||||
| 	 * to use for memory access.  We do this by specifying the IA frequency | ||||
| @ -2541,11 +2552,9 @@ void gen6_update_ring_freq(struct drm_i915_private *dev_priv) | ||||
| 			continue; | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	mutex_unlock(&dev_priv->dev->struct_mutex); | ||||
| } | ||||
| 
 | ||||
| static void ironlake_teardown_rc6(struct drm_device *dev) | ||||
| void ironlake_teardown_rc6(struct drm_device *dev) | ||||
| { | ||||
| 	struct drm_i915_private *dev_priv = dev->dev_private; | ||||
| 
 | ||||
| @ -2562,7 +2571,7 @@ static void ironlake_teardown_rc6(struct drm_device *dev) | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| void ironlake_disable_rc6(struct drm_device *dev) | ||||
| static void ironlake_disable_rc6(struct drm_device *dev) | ||||
| { | ||||
| 	struct drm_i915_private *dev_priv = dev->dev_private; | ||||
| 
 | ||||
| @ -2578,8 +2587,6 @@ void ironlake_disable_rc6(struct drm_device *dev) | ||||
| 		I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT); | ||||
| 		POSTING_READ(RSTDBYCTL); | ||||
| 	} | ||||
| 
 | ||||
| 	ironlake_teardown_rc6(dev); | ||||
| } | ||||
| 
 | ||||
| static int ironlake_setup_rc6(struct drm_device *dev) | ||||
| @ -2601,7 +2608,7 @@ static int ironlake_setup_rc6(struct drm_device *dev) | ||||
| 	return 0; | ||||
| } | ||||
| 
 | ||||
| void ironlake_enable_rc6(struct drm_device *dev) | ||||
| static void ironlake_enable_rc6(struct drm_device *dev) | ||||
| { | ||||
| 	struct drm_i915_private *dev_priv = dev->dev_private; | ||||
| 	struct intel_ring_buffer *ring = &dev_priv->ring[RCS]; | ||||
| @ -2613,12 +2620,11 @@ void ironlake_enable_rc6(struct drm_device *dev) | ||||
| 	if (!intel_enable_rc6(dev)) | ||||
| 		return; | ||||
| 
 | ||||
| 	mutex_lock(&dev->struct_mutex); | ||||
| 	WARN_ON(!mutex_is_locked(&dev->struct_mutex)); | ||||
| 
 | ||||
| 	ret = ironlake_setup_rc6(dev); | ||||
| 	if (ret) { | ||||
| 		mutex_unlock(&dev->struct_mutex); | ||||
| 	if (ret) | ||||
| 		return; | ||||
| 	} | ||||
| 
 | ||||
| 	/*
 | ||||
| 	 * GPU can automatically power down the render unit if given a page | ||||
| @ -2627,7 +2633,6 @@ void ironlake_enable_rc6(struct drm_device *dev) | ||||
| 	ret = intel_ring_begin(ring, 6); | ||||
| 	if (ret) { | ||||
| 		ironlake_teardown_rc6(dev); | ||||
| 		mutex_unlock(&dev->struct_mutex); | ||||
| 		return; | ||||
| 	} | ||||
| 
 | ||||
| @ -2652,13 +2657,11 @@ void ironlake_enable_rc6(struct drm_device *dev) | ||||
| 	if (ret) { | ||||
| 		DRM_ERROR("failed to enable ironlake power power savings\n"); | ||||
| 		ironlake_teardown_rc6(dev); | ||||
| 		mutex_unlock(&dev->struct_mutex); | ||||
| 		return; | ||||
| 	} | ||||
| 
 | ||||
| 	I915_WRITE(PWRCTXA, dev_priv->pwrctx->gtt_offset | PWRCTX_EN); | ||||
| 	I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT); | ||||
| 	mutex_unlock(&dev->struct_mutex); | ||||
| } | ||||
| 
 | ||||
| static unsigned long intel_pxfreq(u32 vidfreq) | ||||
| @ -3154,8 +3157,7 @@ void intel_gpu_ips_teardown(void) | ||||
| 	i915_mch_dev = NULL; | ||||
| 	spin_unlock(&mchdev_lock); | ||||
| } | ||||
| 
 | ||||
| void intel_init_emon(struct drm_device *dev) | ||||
| static void intel_init_emon(struct drm_device *dev) | ||||
| { | ||||
| 	struct drm_i915_private *dev_priv = dev->dev_private; | ||||
| 	u32 lcfuse; | ||||
| @ -3226,6 +3228,28 @@ void intel_init_emon(struct drm_device *dev) | ||||
| 	dev_priv->corr = (lcfuse & LCFUSE_HIV_MASK); | ||||
| } | ||||
| 
 | ||||
| void intel_disable_gt_powersave(struct drm_device *dev) | ||||
| { | ||||
| 	if (IS_IRONLAKE_M(dev)) { | ||||
| 		ironlake_disable_drps(dev); | ||||
| 		ironlake_disable_rc6(dev); | ||||
| 	} else if (INTEL_INFO(dev)->gen >= 6 && !IS_VALLEYVIEW(dev)) { | ||||
| 		gen6_disable_rps(dev); | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| void intel_enable_gt_powersave(struct drm_device *dev) | ||||
| { | ||||
| 	if (IS_IRONLAKE_M(dev)) { | ||||
| 		ironlake_enable_drps(dev); | ||||
| 		ironlake_enable_rc6(dev); | ||||
| 		intel_init_emon(dev); | ||||
| 	} else if ((IS_GEN6(dev) || IS_GEN7(dev)) && !IS_VALLEYVIEW(dev)) { | ||||
| 		gen6_enable_rps(dev); | ||||
| 		gen6_update_ring_freq(dev); | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| static void ironlake_init_clock_gating(struct drm_device *dev) | ||||
| { | ||||
| 	struct drm_i915_private *dev_priv = dev->dev_private; | ||||
| @ -3328,8 +3352,12 @@ static void gen6_init_clock_gating(struct drm_device *dev) | ||||
| 	 * | ||||
| 	 * According to the spec, bit 11 (RCCUNIT) must also be set, | ||||
| 	 * but we didn't debug actual testcases to find it out. | ||||
| 	 * | ||||
| 	 * Also apply WaDisableVDSUnitClockGating and | ||||
| 	 * WaDisableRCPBUnitClockGating. | ||||
| 	 */ | ||||
| 	I915_WRITE(GEN6_UCGCTL2, | ||||
| 		   GEN7_VDSUNIT_CLOCK_GATE_DISABLE | | ||||
| 		   GEN6_RCPBUNIT_CLOCK_GATE_DISABLE | | ||||
| 		   GEN6_RCCUNIT_CLOCK_GATE_DISABLE); | ||||
| 
 | ||||
| @ -3357,6 +3385,9 @@ static void gen6_init_clock_gating(struct drm_device *dev) | ||||
| 		   ILK_DPARB_CLK_GATE  | | ||||
| 		   ILK_DPFD_CLK_GATE); | ||||
| 
 | ||||
| 	I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) | | ||||
| 		   GEN6_MBCTL_ENABLE_BOOT_FETCH); | ||||
| 
 | ||||
| 	for_each_pipe(pipe) { | ||||
| 		I915_WRITE(DSPCNTR(pipe), | ||||
| 			   I915_READ(DSPCNTR(pipe)) | | ||||
| @ -3377,7 +3408,7 @@ static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv) | ||||
| 	I915_WRITE(GEN7_FF_THREAD_MODE, reg); | ||||
| } | ||||
| 
 | ||||
| static void ivybridge_init_clock_gating(struct drm_device *dev) | ||||
| static void haswell_init_clock_gating(struct drm_device *dev) | ||||
| { | ||||
| 	struct drm_i915_private *dev_priv = dev->dev_private; | ||||
| 	int pipe; | ||||
| @ -3427,6 +3458,89 @@ static void ivybridge_init_clock_gating(struct drm_device *dev) | ||||
| 	/* WaDisable4x2SubspanOptimization */ | ||||
| 	I915_WRITE(CACHE_MODE_1, | ||||
| 		   _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE)); | ||||
| 
 | ||||
| 	/* XXX: This is a workaround for early silicon revisions and should be
 | ||||
| 	 * removed later. | ||||
| 	 */ | ||||
| 	I915_WRITE(WM_DBG, | ||||
| 			I915_READ(WM_DBG) | | ||||
| 			WM_DBG_DISALLOW_MULTIPLE_LP | | ||||
| 			WM_DBG_DISALLOW_SPRITE | | ||||
| 			WM_DBG_DISALLOW_MAXFIFO); | ||||
| 
 | ||||
| } | ||||
| 
 | ||||
| static void ivybridge_init_clock_gating(struct drm_device *dev) | ||||
| { | ||||
| 	struct drm_i915_private *dev_priv = dev->dev_private; | ||||
| 	int pipe; | ||||
| 	uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE; | ||||
| 	uint32_t snpcr; | ||||
| 
 | ||||
| 	I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate); | ||||
| 
 | ||||
| 	I915_WRITE(WM3_LP_ILK, 0); | ||||
| 	I915_WRITE(WM2_LP_ILK, 0); | ||||
| 	I915_WRITE(WM1_LP_ILK, 0); | ||||
| 
 | ||||
| 	I915_WRITE(ILK_DSPCLK_GATE, IVB_VRHUNIT_CLK_GATE); | ||||
| 
 | ||||
| 	I915_WRITE(IVB_CHICKEN3, | ||||
| 		   CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE | | ||||
| 		   CHICKEN3_DGMG_DONE_FIX_DISABLE); | ||||
| 
 | ||||
| 	/* Apply the WaDisableRHWOOptimizationForRenderHang workaround. */ | ||||
| 	I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1, | ||||
| 		   GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC); | ||||
| 
 | ||||
| 	/* WaApplyL3ControlAndL3ChickenMode requires those two on Ivy Bridge */ | ||||
| 	I915_WRITE(GEN7_L3CNTLREG1, | ||||
| 			GEN7_WA_FOR_GEN7_L3_CONTROL); | ||||
| 	I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER, | ||||
| 			GEN7_WA_L3_CHICKEN_MODE); | ||||
| 
 | ||||
| 	/* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
 | ||||
| 	 * gating disable must be set.  Failure to set it results in | ||||
| 	 * flickering pixels due to Z write ordering failures after | ||||
| 	 * some amount of runtime in the Mesa "fire" demo, and Unigine | ||||
| 	 * Sanctuary and Tropics, and apparently anything else with | ||||
| 	 * alpha test or pixel discard. | ||||
| 	 * | ||||
| 	 * According to the spec, bit 11 (RCCUNIT) must also be set, | ||||
| 	 * but we didn't debug actual testcases to find it out. | ||||
| 	 * | ||||
| 	 * According to the spec, bit 13 (RCZUNIT) must be set on IVB. | ||||
| 	 * This implements the WaDisableRCZUnitClockGating workaround. | ||||
| 	 */ | ||||
| 	I915_WRITE(GEN6_UCGCTL2, | ||||
| 		   GEN6_RCZUNIT_CLOCK_GATE_DISABLE | | ||||
| 		   GEN6_RCCUNIT_CLOCK_GATE_DISABLE); | ||||
| 
 | ||||
| 	/* This is required by WaCatErrorRejectionIssue */ | ||||
| 	I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG, | ||||
| 			I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) | | ||||
| 			GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB); | ||||
| 
 | ||||
| 	for_each_pipe(pipe) { | ||||
| 		I915_WRITE(DSPCNTR(pipe), | ||||
| 			   I915_READ(DSPCNTR(pipe)) | | ||||
| 			   DISPPLANE_TRICKLE_FEED_DISABLE); | ||||
| 		intel_flush_display_plane(dev_priv, pipe); | ||||
| 	} | ||||
| 
 | ||||
| 	I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) | | ||||
| 		   GEN6_MBCTL_ENABLE_BOOT_FETCH); | ||||
| 
 | ||||
| 	gen7_setup_fixed_func_scheduler(dev_priv); | ||||
| 
 | ||||
| 	/* WaDisable4x2SubspanOptimization */ | ||||
| 	I915_WRITE(CACHE_MODE_1, | ||||
| 		   _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE)); | ||||
| 
 | ||||
| 	snpcr = I915_READ(GEN6_MBCUNIT_SNPCR); | ||||
| 	snpcr &= ~GEN6_MBC_SNPCR_MASK; | ||||
| 	snpcr |= GEN6_MBC_SNPCR_MED; | ||||
| 	I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr); | ||||
| } | ||||
| 
 | ||||
| static void valleyview_init_clock_gating(struct drm_device *dev) | ||||
| @ -3441,11 +3555,6 @@ static void valleyview_init_clock_gating(struct drm_device *dev) | ||||
| 	I915_WRITE(WM2_LP_ILK, 0); | ||||
| 	I915_WRITE(WM1_LP_ILK, 0); | ||||
| 
 | ||||
| 	/* According to the spec, bit 13 (RCZUNIT) must be set on IVB.
 | ||||
| 	 * This implements the WaDisableRCZUnitClockGating workaround. | ||||
| 	 */ | ||||
| 	I915_WRITE(GEN6_UCGCTL2, GEN6_RCZUNIT_CLOCK_GATE_DISABLE); | ||||
| 
 | ||||
| 	I915_WRITE(ILK_DSPCLK_GATE, IVB_VRHUNIT_CLK_GATE); | ||||
| 
 | ||||
| 	I915_WRITE(IVB_CHICKEN3, | ||||
| @ -3465,6 +3574,35 @@ static void valleyview_init_clock_gating(struct drm_device *dev) | ||||
| 		   I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) | | ||||
| 		   GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB); | ||||
| 
 | ||||
| 	I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) | | ||||
| 		   GEN6_MBCTL_ENABLE_BOOT_FETCH); | ||||
| 
 | ||||
| 
 | ||||
| 	/* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
 | ||||
| 	 * gating disable must be set.  Failure to set it results in | ||||
| 	 * flickering pixels due to Z write ordering failures after | ||||
| 	 * some amount of runtime in the Mesa "fire" demo, and Unigine | ||||
| 	 * Sanctuary and Tropics, and apparently anything else with | ||||
| 	 * alpha test or pixel discard. | ||||
| 	 * | ||||
| 	 * According to the spec, bit 11 (RCCUNIT) must also be set, | ||||
| 	 * but we didn't debug actual testcases to find it out. | ||||
| 	 * | ||||
| 	 * According to the spec, bit 13 (RCZUNIT) must be set on IVB. | ||||
| 	 * This implements the WaDisableRCZUnitClockGating workaround. | ||||
| 	 * | ||||
| 	 * Also apply WaDisableVDSUnitClockGating and | ||||
| 	 * WaDisableRCPBUnitClockGating. | ||||
| 	 */ | ||||
| 	I915_WRITE(GEN6_UCGCTL2, | ||||
| 		   GEN7_VDSUNIT_CLOCK_GATE_DISABLE | | ||||
| 		   GEN7_TDLUNIT_CLOCK_GATE_DISABLE | | ||||
| 		   GEN6_RCZUNIT_CLOCK_GATE_DISABLE | | ||||
| 		   GEN6_RCPBUNIT_CLOCK_GATE_DISABLE | | ||||
| 		   GEN6_RCCUNIT_CLOCK_GATE_DISABLE); | ||||
| 
 | ||||
| 	I915_WRITE(GEN7_UCGCTL4, GEN7_L3BANK2X_CLOCK_GATE_DISABLE); | ||||
| 
 | ||||
| 	for_each_pipe(pipe) { | ||||
| 		I915_WRITE(DSPCNTR(pipe), | ||||
| 			   I915_READ(DSPCNTR(pipe)) | | ||||
| @ -3474,6 +3612,19 @@ static void valleyview_init_clock_gating(struct drm_device *dev) | ||||
| 
 | ||||
| 	I915_WRITE(CACHE_MODE_1, | ||||
| 		   _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE)); | ||||
| 
 | ||||
| 	/*
 | ||||
| 	 * On ValleyView, the GUnit needs to signal the GT | ||||
| 	 * when flip and other events complete.  So enable | ||||
| 	 * all the GUnit->GT interrupts here | ||||
| 	 */ | ||||
| 	I915_WRITE(VLV_DPFLIPSTAT, PIPEB_LINE_COMPARE_INT_EN | | ||||
| 		   PIPEB_HLINE_INT_EN | PIPEB_VBLANK_INT_EN | | ||||
| 		   SPRITED_FLIPDONE_INT_EN | SPRITEC_FLIPDONE_INT_EN | | ||||
| 		   PLANEB_FLIPDONE_INT_EN | PIPEA_LINE_COMPARE_INT_EN | | ||||
| 		   PIPEA_HLINE_INT_EN | PIPEA_VBLANK_INT_EN | | ||||
| 		   SPRITEB_FLIPDONE_INT_EN | SPRITEA_FLIPDONE_INT_EN | | ||||
| 		   PLANEA_FLIPDONE_INT_EN); | ||||
| } | ||||
| 
 | ||||
| static void g4x_init_clock_gating(struct drm_device *dev) | ||||
| @ -3681,34 +3832,6 @@ void intel_init_pm(struct drm_device *dev) | ||||
| 
 | ||||
| 	/* For FIFO watermark updates */ | ||||
| 	if (HAS_PCH_SPLIT(dev)) { | ||||
| 		dev_priv->display.force_wake_get = __gen6_gt_force_wake_get; | ||||
| 		dev_priv->display.force_wake_put = __gen6_gt_force_wake_put; | ||||
| 
 | ||||
| 		/* IVB configs may use multi-threaded forcewake */ | ||||
| 		if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) { | ||||
| 			u32	ecobus; | ||||
| 
 | ||||
| 			/* A small trick here - if the bios hasn't configured MT forcewake,
 | ||||
| 			 * and if the device is in RC6, then force_wake_mt_get will not wake | ||||
| 			 * the device and the ECOBUS read will return zero. Which will be | ||||
| 			 * (correctly) interpreted by the test below as MT forcewake being | ||||
| 			 * disabled. | ||||
| 			 */ | ||||
| 			mutex_lock(&dev->struct_mutex); | ||||
| 			__gen6_gt_force_wake_mt_get(dev_priv); | ||||
| 			ecobus = I915_READ_NOTRACE(ECOBUS); | ||||
| 			__gen6_gt_force_wake_mt_put(dev_priv); | ||||
| 			mutex_unlock(&dev->struct_mutex); | ||||
| 
 | ||||
| 			if (ecobus & FORCEWAKE_MT_ENABLE) { | ||||
| 				DRM_DEBUG_KMS("Using MT version of forcewake\n"); | ||||
| 				dev_priv->display.force_wake_get = | ||||
| 					__gen6_gt_force_wake_mt_get; | ||||
| 				dev_priv->display.force_wake_put = | ||||
| 					__gen6_gt_force_wake_mt_put; | ||||
| 			} | ||||
| 		} | ||||
| 
 | ||||
| 		if (HAS_PCH_IBX(dev)) | ||||
| 			dev_priv->display.init_pch_clock_gating = ibx_init_clock_gating; | ||||
| 		else if (HAS_PCH_CPT(dev)) | ||||
| @ -3756,7 +3879,7 @@ void intel_init_pm(struct drm_device *dev) | ||||
| 					      "Disable CxSR\n"); | ||||
| 				dev_priv->display.update_wm = NULL; | ||||
| 			} | ||||
| 			dev_priv->display.init_clock_gating = ivybridge_init_clock_gating; | ||||
| 			dev_priv->display.init_clock_gating = haswell_init_clock_gating; | ||||
| 			dev_priv->display.sanitize_pm = gen6_sanitize_pm; | ||||
| 		} else | ||||
| 			dev_priv->display.update_wm = NULL; | ||||
| @ -3764,8 +3887,6 @@ void intel_init_pm(struct drm_device *dev) | ||||
| 		dev_priv->display.update_wm = valleyview_update_wm; | ||||
| 		dev_priv->display.init_clock_gating = | ||||
| 			valleyview_init_clock_gating; | ||||
| 		dev_priv->display.force_wake_get = vlv_force_wake_get; | ||||
| 		dev_priv->display.force_wake_put = vlv_force_wake_put; | ||||
| 	} else if (IS_PINEVIEW(dev)) { | ||||
| 		if (!intel_get_cxsr_latency(IS_PINEVIEW_G(dev), | ||||
| 					    dev_priv->is_ddr3, | ||||
| @ -3811,10 +3932,196 @@ void intel_init_pm(struct drm_device *dev) | ||||
| 		else | ||||
| 			dev_priv->display.get_fifo_size = i830_get_fifo_size; | ||||
| 	} | ||||
| 
 | ||||
| 	/* We attempt to init the necessary power wells early in the initialization
 | ||||
| 	 * time, so the subsystems that expect power to be enabled can work. | ||||
| 	 */ | ||||
| 	intel_init_power_wells(dev); | ||||
| } | ||||
| 
 | ||||
| static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv) | ||||
| { | ||||
| 	u32 gt_thread_status_mask; | ||||
| 
 | ||||
| 	if (IS_HASWELL(dev_priv->dev)) | ||||
| 		gt_thread_status_mask = GEN6_GT_THREAD_STATUS_CORE_MASK_HSW; | ||||
| 	else | ||||
| 		gt_thread_status_mask = GEN6_GT_THREAD_STATUS_CORE_MASK; | ||||
| 
 | ||||
| 	/* w/a for a sporadic read returning 0 by waiting for the GT
 | ||||
| 	 * thread to wake up. | ||||
| 	 */ | ||||
| 	if (wait_for_atomic_us((I915_READ_NOTRACE(GEN6_GT_THREAD_STATUS_REG) & gt_thread_status_mask) == 0, 500)) | ||||
| 		DRM_ERROR("GT thread status wait timed out\n"); | ||||
| } | ||||
| 
 | ||||
| static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv) | ||||
| { | ||||
| 	u32 forcewake_ack; | ||||
| 
 | ||||
| 	if (IS_HASWELL(dev_priv->dev)) | ||||
| 		forcewake_ack = FORCEWAKE_ACK_HSW; | ||||
| 	else | ||||
| 		forcewake_ack = FORCEWAKE_ACK; | ||||
| 
 | ||||
| 	if (wait_for_atomic_us((I915_READ_NOTRACE(forcewake_ack) & 1) == 0, 500)) | ||||
| 		DRM_ERROR("Force wake wait timed out\n"); | ||||
| 
 | ||||
| 	I915_WRITE_NOTRACE(FORCEWAKE, 1); | ||||
| 
 | ||||
| 	if (wait_for_atomic_us((I915_READ_NOTRACE(forcewake_ack) & 1), 500)) | ||||
| 		DRM_ERROR("Force wake wait timed out\n"); | ||||
| 
 | ||||
| 	__gen6_gt_wait_for_thread_c0(dev_priv); | ||||
| } | ||||
| 
 | ||||
| static void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv) | ||||
| { | ||||
| 	u32 forcewake_ack; | ||||
| 
 | ||||
| 	if (IS_HASWELL(dev_priv->dev)) | ||||
| 		forcewake_ack = FORCEWAKE_ACK_HSW; | ||||
| 	else | ||||
| 		forcewake_ack = FORCEWAKE_MT_ACK; | ||||
| 
 | ||||
| 	if (wait_for_atomic_us((I915_READ_NOTRACE(forcewake_ack) & 1) == 0, 500)) | ||||
| 		DRM_ERROR("Force wake wait timed out\n"); | ||||
| 
 | ||||
| 	I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_ENABLE(1)); | ||||
| 
 | ||||
| 	if (wait_for_atomic_us((I915_READ_NOTRACE(forcewake_ack) & 1), 500)) | ||||
| 		DRM_ERROR("Force wake wait timed out\n"); | ||||
| 
 | ||||
| 	__gen6_gt_wait_for_thread_c0(dev_priv); | ||||
| } | ||||
| 
 | ||||
| /*
 | ||||
|  * Generally this is called implicitly by the register read function. However, | ||||
|  * if some sequence requires the GT to not power down then this function should | ||||
|  * be called at the beginning of the sequence followed by a call to | ||||
|  * gen6_gt_force_wake_put() at the end of the sequence. | ||||
|  */ | ||||
| void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv) | ||||
| { | ||||
| 	unsigned long irqflags; | ||||
| 
 | ||||
| 	spin_lock_irqsave(&dev_priv->gt_lock, irqflags); | ||||
| 	if (dev_priv->forcewake_count++ == 0) | ||||
| 		dev_priv->gt.force_wake_get(dev_priv); | ||||
| 	spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags); | ||||
| } | ||||
| 
 | ||||
| void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv) | ||||
| { | ||||
| 	u32 gtfifodbg; | ||||
| 	gtfifodbg = I915_READ_NOTRACE(GTFIFODBG); | ||||
| 	if (WARN(gtfifodbg & GT_FIFO_CPU_ERROR_MASK, | ||||
| 	     "MMIO read or write has been dropped %x\n", gtfifodbg)) | ||||
| 		I915_WRITE_NOTRACE(GTFIFODBG, GT_FIFO_CPU_ERROR_MASK); | ||||
| } | ||||
| 
 | ||||
| static void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv) | ||||
| { | ||||
| 	I915_WRITE_NOTRACE(FORCEWAKE, 0); | ||||
| 	/* The below doubles as a POSTING_READ */ | ||||
| 	gen6_gt_check_fifodbg(dev_priv); | ||||
| } | ||||
| 
 | ||||
| static void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv) | ||||
| { | ||||
| 	I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_DISABLE(1)); | ||||
| 	/* The below doubles as a POSTING_READ */ | ||||
| 	gen6_gt_check_fifodbg(dev_priv); | ||||
| } | ||||
| 
 | ||||
| /*
 | ||||
|  * see gen6_gt_force_wake_get() | ||||
|  */ | ||||
| void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv) | ||||
| { | ||||
| 	unsigned long irqflags; | ||||
| 
 | ||||
| 	spin_lock_irqsave(&dev_priv->gt_lock, irqflags); | ||||
| 	if (--dev_priv->forcewake_count == 0) | ||||
| 		dev_priv->gt.force_wake_put(dev_priv); | ||||
| 	spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags); | ||||
| } | ||||
| 
 | ||||
| int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv) | ||||
| { | ||||
| 	int ret = 0; | ||||
| 
 | ||||
| 	if (dev_priv->gt_fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES) { | ||||
| 		int loop = 500; | ||||
| 		u32 fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES); | ||||
| 		while (fifo <= GT_FIFO_NUM_RESERVED_ENTRIES && loop--) { | ||||
| 			udelay(10); | ||||
| 			fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES); | ||||
| 		} | ||||
| 		if (WARN_ON(loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES)) | ||||
| 			++ret; | ||||
| 		dev_priv->gt_fifo_count = fifo; | ||||
| 	} | ||||
| 	dev_priv->gt_fifo_count--; | ||||
| 
 | ||||
| 	return ret; | ||||
| } | ||||
| 
 | ||||
| static void vlv_force_wake_get(struct drm_i915_private *dev_priv) | ||||
| { | ||||
| 	/* Already awake? */ | ||||
| 	if ((I915_READ(0x130094) & 0xa1) == 0xa1) | ||||
| 		return; | ||||
| 
 | ||||
| 	I915_WRITE_NOTRACE(FORCEWAKE_VLV, 0xffffffff); | ||||
| 	POSTING_READ(FORCEWAKE_VLV); | ||||
| 
 | ||||
| 	if (wait_for_atomic_us((I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & 1), 500)) | ||||
| 		DRM_ERROR("Force wake wait timed out\n"); | ||||
| 
 | ||||
| 	__gen6_gt_wait_for_thread_c0(dev_priv); | ||||
| } | ||||
| 
 | ||||
| static void vlv_force_wake_put(struct drm_i915_private *dev_priv) | ||||
| { | ||||
| 	I915_WRITE_NOTRACE(FORCEWAKE_VLV, 0xffff0000); | ||||
| 	/* FIXME: confirm VLV behavior with Punit folks */ | ||||
| 	POSTING_READ(FORCEWAKE_VLV); | ||||
| } | ||||
| 
 | ||||
| void intel_gt_init(struct drm_device *dev) | ||||
| { | ||||
| 	struct drm_i915_private *dev_priv = dev->dev_private; | ||||
| 
 | ||||
| 	spin_lock_init(&dev_priv->gt_lock); | ||||
| 
 | ||||
| 	if (IS_VALLEYVIEW(dev)) { | ||||
| 		dev_priv->gt.force_wake_get = vlv_force_wake_get; | ||||
| 		dev_priv->gt.force_wake_put = vlv_force_wake_put; | ||||
| 	} else if (INTEL_INFO(dev)->gen >= 6) { | ||||
| 		dev_priv->gt.force_wake_get = __gen6_gt_force_wake_get; | ||||
| 		dev_priv->gt.force_wake_put = __gen6_gt_force_wake_put; | ||||
| 
 | ||||
| 		/* IVB configs may use multi-threaded forcewake */ | ||||
| 		if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) { | ||||
| 			u32 ecobus; | ||||
| 
 | ||||
| 			/* A small trick here - if the bios hasn't configured
 | ||||
| 			 * MT forcewake, and if the device is in RC6, then | ||||
| 			 * force_wake_mt_get will not wake the device and the | ||||
| 			 * ECOBUS read will return zero. Which will be | ||||
| 			 * (correctly) interpreted by the test below as MT | ||||
| 			 * forcewake being disabled. | ||||
| 			 */ | ||||
| 			mutex_lock(&dev->struct_mutex); | ||||
| 			__gen6_gt_force_wake_mt_get(dev_priv); | ||||
| 			ecobus = I915_READ_NOTRACE(ECOBUS); | ||||
| 			__gen6_gt_force_wake_mt_put(dev_priv); | ||||
| 			mutex_unlock(&dev->struct_mutex); | ||||
| 
 | ||||
| 			if (ecobus & FORCEWAKE_MT_ENABLE) { | ||||
| 				DRM_DEBUG_KMS("Using MT version of forcewake\n"); | ||||
| 				dev_priv->gt.force_wake_get = | ||||
| 					__gen6_gt_force_wake_mt_get; | ||||
| 				dev_priv->gt.force_wake_put = | ||||
| 					__gen6_gt_force_wake_mt_put; | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
|  | ||||
| @ -219,19 +219,28 @@ gen6_render_ring_flush(struct intel_ring_buffer *ring, | ||||
| 	int ret; | ||||
| 
 | ||||
| 	/* Force SNB workarounds for PIPE_CONTROL flushes */ | ||||
| 	intel_emit_post_sync_nonzero_flush(ring); | ||||
| 	ret = intel_emit_post_sync_nonzero_flush(ring); | ||||
| 	if (ret) | ||||
| 		return ret; | ||||
| 
 | ||||
| 	/* Just flush everything.  Experiments have shown that reducing the
 | ||||
| 	 * number of bits based on the write domains has little performance | ||||
| 	 * impact. | ||||
| 	 */ | ||||
| 	flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; | ||||
| 	flags |= PIPE_CONTROL_TLB_INVALIDATE; | ||||
| 	flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE; | ||||
| 	flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE; | ||||
| 	flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; | ||||
| 	flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE; | ||||
| 	flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE; | ||||
| 	flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE; | ||||
| 	/*
 | ||||
| 	 * Ensure that any following seqno writes only happen when the render | ||||
| 	 * cache is indeed flushed (but only if the caller actually wants that). | ||||
| 	 */ | ||||
| 	if (flush_domains) | ||||
| 		flags |= PIPE_CONTROL_CS_STALL; | ||||
| 
 | ||||
| 	ret = intel_ring_begin(ring, 6); | ||||
| 	if (ret) | ||||
| @ -433,11 +442,21 @@ static int init_render_ring(struct intel_ring_buffer *ring) | ||||
| 		 */ | ||||
| 		I915_WRITE(CACHE_MODE_0, | ||||
| 			   _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB)); | ||||
| 
 | ||||
| 		/* This is not explicitly set for GEN6, so read the register.
 | ||||
| 		 * see intel_ring_mi_set_context() for why we care. | ||||
| 		 * TODO: consider explicitly setting the bit for GEN5 | ||||
| 		 */ | ||||
| 		ring->itlb_before_ctx_switch = | ||||
| 			!!(I915_READ(GFX_MODE) & GFX_TLB_INVALIDATE_ALWAYS); | ||||
| 	} | ||||
| 
 | ||||
| 	if (INTEL_INFO(dev)->gen >= 6) | ||||
| 		I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING)); | ||||
| 
 | ||||
| 	if (IS_IVYBRIDGE(dev)) | ||||
| 		I915_WRITE_IMR(ring, ~GEN6_RENDER_L3_PARITY_ERROR); | ||||
| 
 | ||||
| 	return ret; | ||||
| } | ||||
| 
 | ||||
| @ -825,7 +844,11 @@ gen6_ring_get_irq(struct intel_ring_buffer *ring) | ||||
| 
 | ||||
| 	spin_lock_irqsave(&dev_priv->irq_lock, flags); | ||||
| 	if (ring->irq_refcount++ == 0) { | ||||
| 		I915_WRITE_IMR(ring, ~ring->irq_enable_mask); | ||||
| 		if (IS_IVYBRIDGE(dev) && ring->id == RCS) | ||||
| 			I915_WRITE_IMR(ring, ~(ring->irq_enable_mask | | ||||
| 						GEN6_RENDER_L3_PARITY_ERROR)); | ||||
| 		else | ||||
| 			I915_WRITE_IMR(ring, ~ring->irq_enable_mask); | ||||
| 		dev_priv->gt_irq_mask &= ~ring->irq_enable_mask; | ||||
| 		I915_WRITE(GTIMR, dev_priv->gt_irq_mask); | ||||
| 		POSTING_READ(GTIMR); | ||||
| @ -844,7 +867,10 @@ gen6_ring_put_irq(struct intel_ring_buffer *ring) | ||||
| 
 | ||||
| 	spin_lock_irqsave(&dev_priv->irq_lock, flags); | ||||
| 	if (--ring->irq_refcount == 0) { | ||||
| 		I915_WRITE_IMR(ring, ~0); | ||||
| 		if (IS_IVYBRIDGE(dev) && ring->id == RCS) | ||||
| 			I915_WRITE_IMR(ring, ~GEN6_RENDER_L3_PARITY_ERROR); | ||||
| 		else | ||||
| 			I915_WRITE_IMR(ring, ~0); | ||||
| 		dev_priv->gt_irq_mask |= ring->irq_enable_mask; | ||||
| 		I915_WRITE(GTIMR, dev_priv->gt_irq_mask); | ||||
| 		POSTING_READ(GTIMR); | ||||
| @ -946,6 +972,7 @@ static int init_status_page(struct intel_ring_buffer *ring) | ||||
| 	ring->status_page.gfx_addr = obj->gtt_offset; | ||||
| 	ring->status_page.page_addr = kmap(obj->pages[0]); | ||||
| 	if (ring->status_page.page_addr == NULL) { | ||||
| 		ret = -ENOMEM; | ||||
| 		goto err_unpin; | ||||
| 	} | ||||
| 	ring->status_page.obj = obj; | ||||
| @ -969,6 +996,7 @@ static int intel_init_ring_buffer(struct drm_device *dev, | ||||
| 				  struct intel_ring_buffer *ring) | ||||
| { | ||||
| 	struct drm_i915_gem_object *obj; | ||||
| 	struct drm_i915_private *dev_priv = dev->dev_private; | ||||
| 	int ret; | ||||
| 
 | ||||
| 	ring->dev = dev; | ||||
| @ -1002,8 +1030,9 @@ static int intel_init_ring_buffer(struct drm_device *dev, | ||||
| 	if (ret) | ||||
| 		goto err_unpin; | ||||
| 
 | ||||
| 	ring->virtual_start = ioremap_wc(dev->agp->base + obj->gtt_offset, | ||||
| 					 ring->size); | ||||
| 	ring->virtual_start = | ||||
| 		ioremap_wc(dev_priv->mm.gtt->gma_bus_addr + obj->gtt_offset, | ||||
| 			   ring->size); | ||||
| 	if (ring->virtual_start == NULL) { | ||||
| 		DRM_ERROR("Failed to map ringbuffer.\n"); | ||||
| 		ret = -EINVAL; | ||||
| @ -1089,20 +1118,9 @@ static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring) | ||||
| 
 | ||||
| static int intel_ring_wait_seqno(struct intel_ring_buffer *ring, u32 seqno) | ||||
| { | ||||
| 	struct drm_i915_private *dev_priv = ring->dev->dev_private; | ||||
| 	bool was_interruptible; | ||||
| 	int ret; | ||||
| 
 | ||||
| 	/* XXX As we have not yet audited all the paths to check that
 | ||||
| 	 * they are ready for ERESTARTSYS from intel_ring_begin, do not | ||||
| 	 * allow us to be interruptible by a signal. | ||||
| 	 */ | ||||
| 	was_interruptible = dev_priv->mm.interruptible; | ||||
| 	dev_priv->mm.interruptible = false; | ||||
| 
 | ||||
| 	ret = i915_wait_request(ring, seqno); | ||||
| 
 | ||||
| 	dev_priv->mm.interruptible = was_interruptible; | ||||
| 	ret = i915_wait_seqno(ring, seqno); | ||||
| 	if (!ret) | ||||
| 		i915_gem_retire_requests_ring(ring); | ||||
| 
 | ||||
| @ -1200,8 +1218,10 @@ int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n) | ||||
| 		} | ||||
| 
 | ||||
| 		msleep(1); | ||||
| 		if (atomic_read(&dev_priv->mm.wedged)) | ||||
| 			return -EAGAIN; | ||||
| 
 | ||||
| 		ret = i915_gem_check_wedge(dev_priv, dev_priv->mm.interruptible); | ||||
| 		if (ret) | ||||
| 			return ret; | ||||
| 	} while (!time_after(jiffies, end)); | ||||
| 	trace_i915_ring_wait_end(ring); | ||||
| 	return -EBUSY; | ||||
| @ -1210,12 +1230,13 @@ int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n) | ||||
| int intel_ring_begin(struct intel_ring_buffer *ring, | ||||
| 		     int num_dwords) | ||||
| { | ||||
| 	struct drm_i915_private *dev_priv = ring->dev->dev_private; | ||||
| 	drm_i915_private_t *dev_priv = ring->dev->dev_private; | ||||
| 	int n = 4*num_dwords; | ||||
| 	int ret; | ||||
| 
 | ||||
| 	if (unlikely(atomic_read(&dev_priv->mm.wedged))) | ||||
| 		return -EIO; | ||||
| 	ret = i915_gem_check_wedge(dev_priv, dev_priv->mm.interruptible); | ||||
| 	if (ret) | ||||
| 		return ret; | ||||
| 
 | ||||
| 	if (unlikely(ring->tail + n > ring->effective_size)) { | ||||
| 		ret = intel_wrap_ring_buffer(ring); | ||||
| @ -1250,20 +1271,31 @@ static void gen6_bsd_ring_write_tail(struct intel_ring_buffer *ring, | ||||
| 	drm_i915_private_t *dev_priv = ring->dev->dev_private; | ||||
| 
 | ||||
|        /* Every tail move must follow the sequence below */ | ||||
| 	I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL, | ||||
| 		GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK | | ||||
| 		GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_DISABLE); | ||||
| 	I915_WRITE(GEN6_BSD_RNCID, 0x0); | ||||
| 
 | ||||
| 	/* Disable notification that the ring is IDLE. The GT
 | ||||
| 	 * will then assume that it is busy and bring it out of rc6. | ||||
| 	 */ | ||||
| 	I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL, | ||||
| 		   _MASKED_BIT_ENABLE(GEN6_BSD_SLEEP_MSG_DISABLE)); | ||||
| 
 | ||||
| 	/* Clear the context id. Here be magic! */ | ||||
| 	I915_WRITE64(GEN6_BSD_RNCID, 0x0); | ||||
| 
 | ||||
| 	/* Wait for the ring not to be idle, i.e. for it to wake up. */ | ||||
| 	if (wait_for((I915_READ(GEN6_BSD_SLEEP_PSMI_CONTROL) & | ||||
| 		GEN6_BSD_SLEEP_PSMI_CONTROL_IDLE_INDICATOR) == 0, | ||||
| 		50)) | ||||
| 	DRM_ERROR("timed out waiting for IDLE Indicator\n"); | ||||
| 		      GEN6_BSD_SLEEP_INDICATOR) == 0, | ||||
| 		     50)) | ||||
| 		DRM_ERROR("timed out waiting for the BSD ring to wake up\n"); | ||||
| 
 | ||||
| 	/* Now that the ring is fully powered up, update the tail */ | ||||
| 	I915_WRITE_TAIL(ring, value); | ||||
| 	POSTING_READ(RING_TAIL(ring->mmio_base)); | ||||
| 
 | ||||
| 	/* Let the ring send IDLE messages to the GT again,
 | ||||
| 	 * and so let it sleep to conserve power when idle. | ||||
| 	 */ | ||||
| 	I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL, | ||||
| 		GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK | | ||||
| 		GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_ENABLE); | ||||
| 		   _MASKED_BIT_DISABLE(GEN6_BSD_SLEEP_MSG_DISABLE)); | ||||
| } | ||||
| 
 | ||||
| static int gen6_ring_flush(struct intel_ring_buffer *ring, | ||||
|  | ||||
| @ -113,9 +113,17 @@ struct  intel_ring_buffer { | ||||
| 	 * Do we have some not yet emitted requests outstanding? | ||||
| 	 */ | ||||
| 	u32 outstanding_lazy_request; | ||||
| 	bool gpu_caches_dirty; | ||||
| 
 | ||||
| 	wait_queue_head_t irq_queue; | ||||
| 
 | ||||
| 	/**
 | ||||
| 	 * Do an explicit TLB flush before MI_SET_CONTEXT | ||||
| 	 */ | ||||
| 	bool itlb_before_ctx_switch; | ||||
| 	struct i915_hw_context *default_context; | ||||
| 	struct drm_i915_gem_object *last_context_obj; | ||||
| 
 | ||||
| 	void *private; | ||||
| }; | ||||
| 
 | ||||
|  | ||||
| @ -140,9 +140,6 @@ struct intel_sdvo { | ||||
| 
 | ||||
| 	/* DDC bus used by this SDVO encoder */ | ||||
| 	uint8_t ddc_bus; | ||||
| 
 | ||||
| 	/* Input timings for adjusted_mode */ | ||||
| 	struct intel_sdvo_dtd input_dtd; | ||||
| }; | ||||
| 
 | ||||
| struct intel_sdvo_connector { | ||||
| @ -938,7 +935,7 @@ static bool intel_sdvo_set_tv_format(struct intel_sdvo *intel_sdvo) | ||||
| 
 | ||||
| static bool | ||||
| intel_sdvo_set_output_timings_from_mode(struct intel_sdvo *intel_sdvo, | ||||
| 					struct drm_display_mode *mode) | ||||
| 					const struct drm_display_mode *mode) | ||||
| { | ||||
| 	struct intel_sdvo_dtd output_dtd; | ||||
| 
 | ||||
| @ -953,11 +950,15 @@ intel_sdvo_set_output_timings_from_mode(struct intel_sdvo *intel_sdvo, | ||||
| 	return true; | ||||
| } | ||||
| 
 | ||||
| /* Asks the sdvo controller for the preferred input mode given the output mode.
 | ||||
|  * Unfortunately we have to set up the full output mode to do that. */ | ||||
| static bool | ||||
| intel_sdvo_set_input_timings_for_mode(struct intel_sdvo *intel_sdvo, | ||||
| 					struct drm_display_mode *mode, | ||||
| 					struct drm_display_mode *adjusted_mode) | ||||
| intel_sdvo_get_preferred_input_mode(struct intel_sdvo *intel_sdvo, | ||||
| 				    const struct drm_display_mode *mode, | ||||
| 				    struct drm_display_mode *adjusted_mode) | ||||
| { | ||||
| 	struct intel_sdvo_dtd input_dtd; | ||||
| 
 | ||||
| 	/* Reset the input timing to the screen. Assume always input 0. */ | ||||
| 	if (!intel_sdvo_set_target_input(intel_sdvo)) | ||||
| 		return false; | ||||
| @ -969,16 +970,16 @@ intel_sdvo_set_input_timings_for_mode(struct intel_sdvo *intel_sdvo, | ||||
| 		return false; | ||||
| 
 | ||||
| 	if (!intel_sdvo_get_preferred_input_timing(intel_sdvo, | ||||
| 						   &intel_sdvo->input_dtd)) | ||||
| 						   &input_dtd)) | ||||
| 		return false; | ||||
| 
 | ||||
| 	intel_sdvo_get_mode_from_dtd(adjusted_mode, &intel_sdvo->input_dtd); | ||||
| 	intel_sdvo_get_mode_from_dtd(adjusted_mode, &input_dtd); | ||||
| 
 | ||||
| 	return true; | ||||
| } | ||||
| 
 | ||||
| static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder, | ||||
| 				  struct drm_display_mode *mode, | ||||
| 				  const struct drm_display_mode *mode, | ||||
| 				  struct drm_display_mode *adjusted_mode) | ||||
| { | ||||
| 	struct intel_sdvo *intel_sdvo = to_intel_sdvo(encoder); | ||||
| @ -993,17 +994,17 @@ static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder, | ||||
| 		if (!intel_sdvo_set_output_timings_from_mode(intel_sdvo, mode)) | ||||
| 			return false; | ||||
| 
 | ||||
| 		(void) intel_sdvo_set_input_timings_for_mode(intel_sdvo, | ||||
| 							     mode, | ||||
| 							     adjusted_mode); | ||||
| 		(void) intel_sdvo_get_preferred_input_mode(intel_sdvo, | ||||
| 							   mode, | ||||
| 							   adjusted_mode); | ||||
| 	} else if (intel_sdvo->is_lvds) { | ||||
| 		if (!intel_sdvo_set_output_timings_from_mode(intel_sdvo, | ||||
| 							     intel_sdvo->sdvo_lvds_fixed_mode)) | ||||
| 			return false; | ||||
| 
 | ||||
| 		(void) intel_sdvo_set_input_timings_for_mode(intel_sdvo, | ||||
| 							     mode, | ||||
| 							     adjusted_mode); | ||||
| 		(void) intel_sdvo_get_preferred_input_mode(intel_sdvo, | ||||
| 							   mode, | ||||
| 							   adjusted_mode); | ||||
| 	} | ||||
| 
 | ||||
| 	/* Make the CRTC code factor in the SDVO pixel multiplier.  The
 | ||||
| @ -1057,7 +1058,9 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder, | ||||
| 					     intel_sdvo->sdvo_lvds_fixed_mode); | ||||
| 	else | ||||
| 		intel_sdvo_get_dtd_from_mode(&output_dtd, mode); | ||||
| 	(void) intel_sdvo_set_output_timing(intel_sdvo, &output_dtd); | ||||
| 	if (!intel_sdvo_set_output_timing(intel_sdvo, &output_dtd)) | ||||
| 		DRM_INFO("Setting output timings on %s failed\n", | ||||
| 			 SDVO_NAME(intel_sdvo)); | ||||
| 
 | ||||
| 	/* Set the input timing to the screen. Assume always input 0. */ | ||||
| 	if (!intel_sdvo_set_target_input(intel_sdvo)) | ||||
| @ -1079,7 +1082,9 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder, | ||||
| 	 * adjusted_mode. | ||||
| 	 */ | ||||
| 	intel_sdvo_get_dtd_from_mode(&input_dtd, adjusted_mode); | ||||
| 	(void) intel_sdvo_set_input_timing(intel_sdvo, &input_dtd); | ||||
| 	if (!intel_sdvo_set_input_timing(intel_sdvo, &input_dtd)) | ||||
| 		DRM_INFO("Setting input timings on %s failed\n", | ||||
| 			 SDVO_NAME(intel_sdvo)); | ||||
| 
 | ||||
| 	switch (pixel_multiplier) { | ||||
| 	default: | ||||
| @ -1376,7 +1381,7 @@ intel_sdvo_detect(struct drm_connector *connector, bool force) | ||||
| 
 | ||||
| 	/* add 30ms delay when the output type might be TV */ | ||||
| 	if (intel_sdvo->caps.output_flags & SDVO_TV_MASK) | ||||
| 		mdelay(30); | ||||
| 		msleep(30); | ||||
| 
 | ||||
| 	if (!intel_sdvo_read_response(intel_sdvo, &response, 2)) | ||||
| 		return connector_status_unknown; | ||||
| @ -2521,6 +2526,7 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob) | ||||
| 	struct drm_i915_private *dev_priv = dev->dev_private; | ||||
| 	struct intel_encoder *intel_encoder; | ||||
| 	struct intel_sdvo *intel_sdvo; | ||||
| 	u32 hotplug_mask; | ||||
| 	int i; | ||||
| 
 | ||||
| 	intel_sdvo = kzalloc(sizeof(struct intel_sdvo), GFP_KERNEL); | ||||
| @ -2552,10 +2558,18 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob) | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	if (intel_sdvo->is_sdvob) | ||||
| 		dev_priv->hotplug_supported_mask |= SDVOB_HOTPLUG_INT_STATUS; | ||||
| 	else | ||||
| 		dev_priv->hotplug_supported_mask |= SDVOC_HOTPLUG_INT_STATUS; | ||||
| 	hotplug_mask = 0; | ||||
| 	if (IS_G4X(dev)) { | ||||
| 		hotplug_mask = intel_sdvo->is_sdvob ? | ||||
| 			SDVOB_HOTPLUG_INT_STATUS_G4X : SDVOC_HOTPLUG_INT_STATUS_G4X; | ||||
| 	} else if (IS_GEN4(dev)) { | ||||
| 		hotplug_mask = intel_sdvo->is_sdvob ? | ||||
| 			SDVOB_HOTPLUG_INT_STATUS_I965 : SDVOC_HOTPLUG_INT_STATUS_I965; | ||||
| 	} else { | ||||
| 		hotplug_mask = intel_sdvo->is_sdvob ? | ||||
| 			SDVOB_HOTPLUG_INT_STATUS_I915 : SDVOC_HOTPLUG_INT_STATUS_I915; | ||||
| 	} | ||||
| 	dev_priv->hotplug_supported_mask |= hotplug_mask; | ||||
| 
 | ||||
| 	drm_encoder_helper_add(&intel_encoder->base, &intel_sdvo_helper_funcs); | ||||
| 
 | ||||
|  | ||||
| @ -56,6 +56,7 @@ ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb, | ||||
| 	sprctl &= ~SPRITE_PIXFORMAT_MASK; | ||||
| 	sprctl &= ~SPRITE_RGB_ORDER_RGBX; | ||||
| 	sprctl &= ~SPRITE_YUV_BYTE_ORDER_MASK; | ||||
| 	sprctl &= ~SPRITE_TILED; | ||||
| 
 | ||||
| 	switch (fb->pixel_format) { | ||||
| 	case DRM_FORMAT_XBGR8888: | ||||
| @ -84,7 +85,7 @@ ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb, | ||||
| 		break; | ||||
| 	default: | ||||
| 		DRM_DEBUG_DRIVER("bad pixel format, assuming RGBX888\n"); | ||||
| 		sprctl |= DVS_FORMAT_RGBX888; | ||||
| 		sprctl |= SPRITE_FORMAT_RGBX888; | ||||
| 		pixel_size = 4; | ||||
| 		break; | ||||
| 	} | ||||
| @ -233,6 +234,7 @@ ilk_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb, | ||||
| 	dvscntr &= ~DVS_PIXFORMAT_MASK; | ||||
| 	dvscntr &= ~DVS_RGB_ORDER_XBGR; | ||||
| 	dvscntr &= ~DVS_YUV_BYTE_ORDER_MASK; | ||||
| 	dvscntr &= ~DVS_TILED; | ||||
| 
 | ||||
| 	switch (fb->pixel_format) { | ||||
| 	case DRM_FORMAT_XBGR8888: | ||||
| @ -326,6 +328,12 @@ intel_enable_primary(struct drm_crtc *crtc) | ||||
| 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | ||||
| 	int reg = DSPCNTR(intel_crtc->plane); | ||||
| 
 | ||||
| 	if (!intel_crtc->primary_disabled) | ||||
| 		return; | ||||
| 
 | ||||
| 	intel_crtc->primary_disabled = false; | ||||
| 	intel_update_fbc(dev); | ||||
| 
 | ||||
| 	I915_WRITE(reg, I915_READ(reg) | DISPLAY_PLANE_ENABLE); | ||||
| } | ||||
| 
 | ||||
| @ -337,7 +345,13 @@ intel_disable_primary(struct drm_crtc *crtc) | ||||
| 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | ||||
| 	int reg = DSPCNTR(intel_crtc->plane); | ||||
| 
 | ||||
| 	if (intel_crtc->primary_disabled) | ||||
| 		return; | ||||
| 
 | ||||
| 	I915_WRITE(reg, I915_READ(reg) & ~DISPLAY_PLANE_ENABLE); | ||||
| 
 | ||||
| 	intel_crtc->primary_disabled = true; | ||||
| 	intel_update_fbc(dev); | ||||
| } | ||||
| 
 | ||||
| static int | ||||
| @ -485,18 +499,14 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, | ||||
| 	 * Be sure to re-enable the primary before the sprite is no longer | ||||
| 	 * covering it fully. | ||||
| 	 */ | ||||
| 	if (!disable_primary && intel_plane->primary_disabled) { | ||||
| 	if (!disable_primary) | ||||
| 		intel_enable_primary(crtc); | ||||
| 		intel_plane->primary_disabled = false; | ||||
| 	} | ||||
| 
 | ||||
| 	intel_plane->update_plane(plane, fb, obj, crtc_x, crtc_y, | ||||
| 				  crtc_w, crtc_h, x, y, src_w, src_h); | ||||
| 
 | ||||
| 	if (disable_primary) { | ||||
| 	if (disable_primary) | ||||
| 		intel_disable_primary(crtc); | ||||
| 		intel_plane->primary_disabled = true; | ||||
| 	} | ||||
| 
 | ||||
| 	/* Unpin old obj after new one is active to avoid ugliness */ | ||||
| 	if (old_obj) { | ||||
| @ -527,11 +537,8 @@ intel_disable_plane(struct drm_plane *plane) | ||||
| 	struct intel_plane *intel_plane = to_intel_plane(plane); | ||||
| 	int ret = 0; | ||||
| 
 | ||||
| 	if (intel_plane->primary_disabled) { | ||||
| 	if (plane->crtc) | ||||
| 		intel_enable_primary(plane->crtc); | ||||
| 		intel_plane->primary_disabled = false; | ||||
| 	} | ||||
| 
 | ||||
| 	intel_plane->disable_plane(plane); | ||||
| 
 | ||||
| 	if (!intel_plane->obj) | ||||
| @ -685,6 +692,7 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe) | ||||
| 		break; | ||||
| 
 | ||||
| 	default: | ||||
| 		kfree(intel_plane); | ||||
| 		return -ENODEV; | ||||
| 	} | ||||
| 
 | ||||
| @ -699,4 +707,3 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe) | ||||
| 
 | ||||
| 	return ret; | ||||
| } | ||||
| 
 | ||||
|  | ||||
| @ -891,24 +891,21 @@ intel_tv_mode_valid(struct drm_connector *connector, | ||||
| 
 | ||||
| 
 | ||||
| static bool | ||||
| intel_tv_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode, | ||||
| intel_tv_mode_fixup(struct drm_encoder *encoder, | ||||
| 		    const struct drm_display_mode *mode, | ||||
| 		    struct drm_display_mode *adjusted_mode) | ||||
| { | ||||
| 	struct drm_device *dev = encoder->dev; | ||||
| 	struct drm_mode_config *drm_config = &dev->mode_config; | ||||
| 	struct intel_tv *intel_tv = enc_to_intel_tv(encoder); | ||||
| 	const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv); | ||||
| 	struct drm_encoder *other_encoder; | ||||
| 	struct intel_encoder *other_encoder; | ||||
| 
 | ||||
| 	if (!tv_mode) | ||||
| 		return false; | ||||
| 
 | ||||
| 	/* FIXME: lock encoder list */ | ||||
| 	list_for_each_entry(other_encoder, &drm_config->encoder_list, head) { | ||||
| 		if (other_encoder != encoder && | ||||
| 		    other_encoder->crtc == encoder->crtc) | ||||
| 	for_each_encoder_on_crtc(dev, encoder->crtc, other_encoder) | ||||
| 		if (&other_encoder->base != encoder) | ||||
| 			return false; | ||||
| 	} | ||||
| 
 | ||||
| 	adjusted_mode->clock = tv_mode->clock; | ||||
| 	return true; | ||||
|  | ||||
| @ -75,7 +75,6 @@ static struct drm_driver driver = { | ||||
| 	.irq_postinstall = mga_driver_irq_postinstall, | ||||
| 	.irq_uninstall = mga_driver_irq_uninstall, | ||||
| 	.irq_handler = mga_driver_irq_handler, | ||||
| 	.reclaim_buffers = drm_core_reclaim_buffers, | ||||
| 	.ioctls = mga_ioctls, | ||||
| 	.dma_ioctl = mga_dma_buffers, | ||||
| 	.fops = &mga_driver_fops, | ||||
|  | ||||
| @ -47,6 +47,9 @@ static void mgag200_kick_out_firmware_fb(struct pci_dev *pdev) | ||||
| 	bool primary = false; | ||||
| 
 | ||||
| 	ap = alloc_apertures(1); | ||||
| 	if (!ap) | ||||
| 		return; | ||||
| 
 | ||||
| 	ap->ranges[0].base = pci_resource_start(pdev, 0); | ||||
| 	ap->ranges[0].size = pci_resource_len(pdev, 0); | ||||
| 
 | ||||
|  | ||||
| @ -78,8 +78,8 @@ static inline void mga_wait_busy(struct mga_device *mdev) | ||||
|  * to just pass that straight through, so this does nothing | ||||
|  */ | ||||
| static bool mga_crtc_mode_fixup(struct drm_crtc *crtc, | ||||
| 				   struct drm_display_mode *mode, | ||||
| 				   struct drm_display_mode *adjusted_mode) | ||||
| 				const struct drm_display_mode *mode, | ||||
| 				struct drm_display_mode *adjusted_mode) | ||||
| { | ||||
| 	return true; | ||||
| } | ||||
| @ -1322,8 +1322,8 @@ void mga_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green, | ||||
|  * to handle any encoder-specific limitations | ||||
|  */ | ||||
| static bool mga_encoder_mode_fixup(struct drm_encoder *encoder, | ||||
| 				  struct drm_display_mode *mode, | ||||
| 				  struct drm_display_mode *adjusted_mode) | ||||
| 				   const struct drm_display_mode *mode, | ||||
| 				   struct drm_display_mode *adjusted_mode) | ||||
| { | ||||
| 	return true; | ||||
| } | ||||
|  | ||||
| @ -4,7 +4,7 @@ | ||||
| 
 | ||||
| ccflags-y := -Iinclude/drm | ||||
| nouveau-y := nouveau_drv.o nouveau_state.o nouveau_channel.o nouveau_mem.o \
 | ||||
|              nouveau_object.o nouveau_irq.o nouveau_notifier.o \
 | ||||
|              nouveau_gpuobj.o nouveau_irq.o nouveau_notifier.o \
 | ||||
|              nouveau_sgdma.o nouveau_dma.o nouveau_util.o \
 | ||||
|              nouveau_bo.o nouveau_fence.o nouveau_gem.o nouveau_ttm.o \
 | ||||
|              nouveau_hw.o nouveau_calc.o nouveau_bios.o nouveau_i2c.o \
 | ||||
| @ -12,6 +12,7 @@ nouveau-y := nouveau_drv.o nouveau_state.o nouveau_channel.o nouveau_mem.o \ | ||||
|              nouveau_hdmi.o nouveau_dp.o nouveau_ramht.o \
 | ||||
| 	     nouveau_pm.o nouveau_volt.o nouveau_perf.o nouveau_temp.o \
 | ||||
| 	     nouveau_mm.o nouveau_vm.o nouveau_mxm.o nouveau_gpio.o \
 | ||||
| 	     nouveau_abi16.o \
 | ||||
|              nv04_timer.o \
 | ||||
|              nv04_mc.o nv40_mc.o nv50_mc.o \
 | ||||
|              nv04_fb.o nv10_fb.o nv20_fb.o nv30_fb.o nv40_fb.o \
 | ||||
|  | ||||
							
								
								
									
										245
									
								
								drivers/gpu/drm/nouveau/nouveau_abi16.c
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										245
									
								
								drivers/gpu/drm/nouveau/nouveau_abi16.c
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,245 @@ | ||||
| /*
 | ||||
|  * Copyright 2012 Red Hat Inc. | ||||
|  * | ||||
|  * Permission is hereby granted, free of charge, to any person obtaining a | ||||
|  * copy of this software and associated documentation files (the "Software"), | ||||
|  * to deal in the Software without restriction, including without limitation | ||||
|  * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||||
|  * and/or sell copies of the Software, and to permit persons to whom the | ||||
|  * Software is furnished to do so, subject to the following conditions: | ||||
|  * | ||||
|  * The above copyright notice and this permission notice shall be included in | ||||
|  * all copies or substantial portions of the Software. | ||||
|  * | ||||
|  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||||
|  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||||
|  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL | ||||
|  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||||
|  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||||
|  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||||
|  * OTHER DEALINGS IN THE SOFTWARE. | ||||
|  * | ||||
|  */ | ||||
| 
 | ||||
| #include "drmP.h" | ||||
| 
 | ||||
| #include "nouveau_drv.h" | ||||
| #include "nouveau_dma.h" | ||||
| #include "nouveau_abi16.h" | ||||
| #include "nouveau_ramht.h" | ||||
| #include "nouveau_software.h" | ||||
| 
 | ||||
| int | ||||
| nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS) | ||||
| { | ||||
| 	struct drm_nouveau_private *dev_priv = dev->dev_private; | ||||
| 	struct drm_nouveau_getparam *getparam = data; | ||||
| 
 | ||||
| 	switch (getparam->param) { | ||||
| 	case NOUVEAU_GETPARAM_CHIPSET_ID: | ||||
| 		getparam->value = dev_priv->chipset; | ||||
| 		break; | ||||
| 	case NOUVEAU_GETPARAM_PCI_VENDOR: | ||||
| 		getparam->value = dev->pci_vendor; | ||||
| 		break; | ||||
| 	case NOUVEAU_GETPARAM_PCI_DEVICE: | ||||
| 		getparam->value = dev->pci_device; | ||||
| 		break; | ||||
| 	case NOUVEAU_GETPARAM_BUS_TYPE: | ||||
| 		if (drm_pci_device_is_agp(dev)) | ||||
| 			getparam->value = 0; | ||||
| 		else | ||||
| 		if (!pci_is_pcie(dev->pdev)) | ||||
| 			getparam->value = 1; | ||||
| 		else | ||||
| 			getparam->value = 2; | ||||
| 		break; | ||||
| 	case NOUVEAU_GETPARAM_FB_SIZE: | ||||
| 		getparam->value = dev_priv->fb_available_size; | ||||
| 		break; | ||||
| 	case NOUVEAU_GETPARAM_AGP_SIZE: | ||||
| 		getparam->value = dev_priv->gart_info.aper_size; | ||||
| 		break; | ||||
| 	case NOUVEAU_GETPARAM_VM_VRAM_BASE: | ||||
| 		getparam->value = 0; /* deprecated */ | ||||
| 		break; | ||||
| 	case NOUVEAU_GETPARAM_PTIMER_TIME: | ||||
| 		getparam->value = dev_priv->engine.timer.read(dev); | ||||
| 		break; | ||||
| 	case NOUVEAU_GETPARAM_HAS_BO_USAGE: | ||||
| 		getparam->value = 1; | ||||
| 		break; | ||||
| 	case NOUVEAU_GETPARAM_HAS_PAGEFLIP: | ||||
| 		getparam->value = 1; | ||||
| 		break; | ||||
| 	case NOUVEAU_GETPARAM_GRAPH_UNITS: | ||||
| 		/* NV40 and NV50 versions are quite different, but register
 | ||||
| 		 * address is the same. User is supposed to know the card | ||||
| 		 * family anyway... */ | ||||
| 		if (dev_priv->chipset >= 0x40) { | ||||
| 			getparam->value = nv_rd32(dev, NV40_PMC_GRAPH_UNITS); | ||||
| 			break; | ||||
| 		} | ||||
| 		/* FALLTHRU */ | ||||
| 	default: | ||||
| 		NV_DEBUG(dev, "unknown parameter %lld\n", getparam->param); | ||||
| 		return -EINVAL; | ||||
| 	} | ||||
| 
 | ||||
| 	return 0; | ||||
| } | ||||
| 
 | ||||
| int | ||||
| nouveau_abi16_ioctl_setparam(ABI16_IOCTL_ARGS) | ||||
| { | ||||
| 	return -EINVAL; | ||||
| } | ||||
| 
 | ||||
| int | ||||
| nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS) | ||||
| { | ||||
| 	struct drm_nouveau_private *dev_priv = dev->dev_private; | ||||
| 	struct drm_nouveau_channel_alloc *init = data; | ||||
| 	struct nouveau_channel *chan; | ||||
| 	int ret; | ||||
| 
 | ||||
| 	if (!dev_priv->eng[NVOBJ_ENGINE_GR]) | ||||
| 		return -ENODEV; | ||||
| 
 | ||||
| 	if (init->fb_ctxdma_handle == ~0 || init->tt_ctxdma_handle == ~0) | ||||
| 		return -EINVAL; | ||||
| 
 | ||||
| 	ret = nouveau_channel_alloc(dev, &chan, file_priv, | ||||
| 				    init->fb_ctxdma_handle, | ||||
| 				    init->tt_ctxdma_handle); | ||||
| 	if (ret) | ||||
| 		return ret; | ||||
| 	init->channel  = chan->id; | ||||
| 
 | ||||
| 	if (nouveau_vram_pushbuf == 0) { | ||||
| 		if (chan->dma.ib_max) | ||||
| 			init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_VRAM | | ||||
| 						NOUVEAU_GEM_DOMAIN_GART; | ||||
| 		else if (chan->pushbuf_bo->bo.mem.mem_type == TTM_PL_VRAM) | ||||
| 			init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_VRAM; | ||||
| 		else | ||||
| 			init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_GART; | ||||
| 	} else { | ||||
| 		init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_VRAM; | ||||
| 	} | ||||
| 
 | ||||
| 	if (dev_priv->card_type < NV_C0) { | ||||
| 		init->subchan[0].handle = 0x00000000; | ||||
| 		init->subchan[0].grclass = 0x0000; | ||||
| 		init->subchan[1].handle = NvSw; | ||||
| 		init->subchan[1].grclass = NV_SW; | ||||
| 		init->nr_subchan = 2; | ||||
| 	} | ||||
| 
 | ||||
| 	/* Named memory object area */ | ||||
| 	ret = drm_gem_handle_create(file_priv, chan->notifier_bo->gem, | ||||
| 				    &init->notifier_handle); | ||||
| 
 | ||||
| 	if (ret == 0) | ||||
| 		atomic_inc(&chan->users); /* userspace reference */ | ||||
| 	nouveau_channel_put(&chan); | ||||
| 	return ret; | ||||
| } | ||||
| 
 | ||||
| int | ||||
| nouveau_abi16_ioctl_channel_free(ABI16_IOCTL_ARGS) | ||||
| { | ||||
| 	struct drm_nouveau_channel_free *req = data; | ||||
| 	struct nouveau_channel *chan; | ||||
| 
 | ||||
| 	chan = nouveau_channel_get(file_priv, req->channel); | ||||
| 	if (IS_ERR(chan)) | ||||
| 		return PTR_ERR(chan); | ||||
| 
 | ||||
| 	list_del(&chan->list); | ||||
| 	atomic_dec(&chan->users); | ||||
| 	nouveau_channel_put(&chan); | ||||
| 	return 0; | ||||
| } | ||||
| 
 | ||||
| int | ||||
| nouveau_abi16_ioctl_grobj_alloc(ABI16_IOCTL_ARGS) | ||||
| { | ||||
| 	struct drm_nouveau_grobj_alloc *init = data; | ||||
| 	struct nouveau_channel *chan; | ||||
| 	int ret; | ||||
| 
 | ||||
| 	if (init->handle == ~0) | ||||
| 		return -EINVAL; | ||||
| 
 | ||||
| 	/* compatibility with userspace that assumes 506e for all chipsets */ | ||||
| 	if (init->class == 0x506e) { | ||||
| 		init->class = nouveau_software_class(dev); | ||||
| 		if (init->class == 0x906e) | ||||
| 			return 0; | ||||
| 	} else | ||||
| 	if (init->class == 0x906e) { | ||||
| 		NV_ERROR(dev, "906e not supported yet\n"); | ||||
| 		return -EINVAL; | ||||
| 	} | ||||
| 
 | ||||
| 	chan = nouveau_channel_get(file_priv, init->channel); | ||||
| 	if (IS_ERR(chan)) | ||||
| 		return PTR_ERR(chan); | ||||
| 
 | ||||
| 	if (nouveau_ramht_find(chan, init->handle)) { | ||||
| 		ret = -EEXIST; | ||||
| 		goto out; | ||||
| 	} | ||||
| 
 | ||||
| 	ret = nouveau_gpuobj_gr_new(chan, init->handle, init->class); | ||||
| 	if (ret) { | ||||
| 		NV_ERROR(dev, "Error creating object: %d (%d/0x%08x)\n", | ||||
| 			 ret, init->channel, init->handle); | ||||
| 	} | ||||
| 
 | ||||
| out: | ||||
| 	nouveau_channel_put(&chan); | ||||
| 	return ret; | ||||
| } | ||||
| 
 | ||||
| int | ||||
| nouveau_abi16_ioctl_notifierobj_alloc(ABI16_IOCTL_ARGS) | ||||
| { | ||||
| 	struct drm_nouveau_private *dev_priv = dev->dev_private; | ||||
| 	struct drm_nouveau_notifierobj_alloc *na = data; | ||||
| 	struct nouveau_channel *chan; | ||||
| 	int ret; | ||||
| 
 | ||||
| 	/* completely unnecessary for these chipsets... */ | ||||
| 	if (unlikely(dev_priv->card_type >= NV_C0)) | ||||
| 		return -EINVAL; | ||||
| 
 | ||||
| 	chan = nouveau_channel_get(file_priv, na->channel); | ||||
| 	if (IS_ERR(chan)) | ||||
| 		return PTR_ERR(chan); | ||||
| 
 | ||||
| 	ret = nouveau_notifier_alloc(chan, na->handle, na->size, 0, 0x1000, | ||||
| 				     &na->offset); | ||||
| 	nouveau_channel_put(&chan); | ||||
| 	return ret; | ||||
| } | ||||
| 
 | ||||
| int | ||||
| nouveau_abi16_ioctl_gpuobj_free(ABI16_IOCTL_ARGS) | ||||
| { | ||||
| 	struct drm_nouveau_gpuobj_free *objfree = data; | ||||
| 	struct nouveau_channel *chan; | ||||
| 	int ret; | ||||
| 
 | ||||
| 	chan = nouveau_channel_get(file_priv, objfree->channel); | ||||
| 	if (IS_ERR(chan)) | ||||
| 		return PTR_ERR(chan); | ||||
| 
 | ||||
| 	/* Synchronize with the user channel */ | ||||
| 	nouveau_channel_idle(chan); | ||||
| 
 | ||||
| 	ret = nouveau_ramht_remove(chan, objfree->handle); | ||||
| 	nouveau_channel_put(&chan); | ||||
| 	return ret; | ||||
| } | ||||
							
								
								
									
										83
									
								
								drivers/gpu/drm/nouveau/nouveau_abi16.h
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										83
									
								
								drivers/gpu/drm/nouveau/nouveau_abi16.h
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,83 @@ | ||||
| #ifndef __NOUVEAU_ABI16_H__ | ||||
| #define __NOUVEAU_ABI16_H__ | ||||
| 
 | ||||
| #define ABI16_IOCTL_ARGS                                                       \ | ||||
| 	struct drm_device *dev, void *data, struct drm_file *file_priv | ||||
| int nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS); | ||||
| int nouveau_abi16_ioctl_setparam(ABI16_IOCTL_ARGS); | ||||
| int nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS); | ||||
| int nouveau_abi16_ioctl_channel_free(ABI16_IOCTL_ARGS); | ||||
| int nouveau_abi16_ioctl_grobj_alloc(ABI16_IOCTL_ARGS); | ||||
| int nouveau_abi16_ioctl_notifierobj_alloc(ABI16_IOCTL_ARGS); | ||||
| int nouveau_abi16_ioctl_gpuobj_free(ABI16_IOCTL_ARGS); | ||||
| 
 | ||||
| struct drm_nouveau_channel_alloc { | ||||
| 	uint32_t     fb_ctxdma_handle; | ||||
| 	uint32_t     tt_ctxdma_handle; | ||||
| 
 | ||||
| 	int          channel; | ||||
| 	uint32_t     pushbuf_domains; | ||||
| 
 | ||||
| 	/* Notifier memory */ | ||||
| 	uint32_t     notifier_handle; | ||||
| 
 | ||||
| 	/* DRM-enforced subchannel assignments */ | ||||
| 	struct { | ||||
| 		uint32_t handle; | ||||
| 		uint32_t grclass; | ||||
| 	} subchan[8]; | ||||
| 	uint32_t nr_subchan; | ||||
| }; | ||||
| 
 | ||||
| struct drm_nouveau_channel_free { | ||||
| 	int channel; | ||||
| }; | ||||
| 
 | ||||
| struct drm_nouveau_grobj_alloc { | ||||
| 	int      channel; | ||||
| 	uint32_t handle; | ||||
| 	int      class; | ||||
| }; | ||||
| 
 | ||||
| struct drm_nouveau_notifierobj_alloc { | ||||
| 	uint32_t channel; | ||||
| 	uint32_t handle; | ||||
| 	uint32_t size; | ||||
| 	uint32_t offset; | ||||
| }; | ||||
| 
 | ||||
| struct drm_nouveau_gpuobj_free { | ||||
| 	int      channel; | ||||
| 	uint32_t handle; | ||||
| }; | ||||
| 
 | ||||
| #define NOUVEAU_GETPARAM_PCI_VENDOR      3 | ||||
| #define NOUVEAU_GETPARAM_PCI_DEVICE      4 | ||||
| #define NOUVEAU_GETPARAM_BUS_TYPE        5 | ||||
| #define NOUVEAU_GETPARAM_FB_SIZE         8 | ||||
| #define NOUVEAU_GETPARAM_AGP_SIZE        9 | ||||
| #define NOUVEAU_GETPARAM_CHIPSET_ID      11 | ||||
| #define NOUVEAU_GETPARAM_VM_VRAM_BASE    12 | ||||
| #define NOUVEAU_GETPARAM_GRAPH_UNITS     13 | ||||
| #define NOUVEAU_GETPARAM_PTIMER_TIME     14 | ||||
| #define NOUVEAU_GETPARAM_HAS_BO_USAGE    15 | ||||
| #define NOUVEAU_GETPARAM_HAS_PAGEFLIP    16 | ||||
| struct drm_nouveau_getparam { | ||||
| 	uint64_t param; | ||||
| 	uint64_t value; | ||||
| }; | ||||
| 
 | ||||
| struct drm_nouveau_setparam { | ||||
| 	uint64_t param; | ||||
| 	uint64_t value; | ||||
| }; | ||||
| 
 | ||||
| #define DRM_IOCTL_NOUVEAU_GETPARAM           DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_GETPARAM, struct drm_nouveau_getparam) | ||||
| #define DRM_IOCTL_NOUVEAU_SETPARAM           DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_SETPARAM, struct drm_nouveau_setparam) | ||||
| #define DRM_IOCTL_NOUVEAU_CHANNEL_ALLOC      DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_CHANNEL_ALLOC, struct drm_nouveau_channel_alloc) | ||||
| #define DRM_IOCTL_NOUVEAU_CHANNEL_FREE       DRM_IOW (DRM_COMMAND_BASE + DRM_NOUVEAU_CHANNEL_FREE, struct drm_nouveau_channel_free) | ||||
| #define DRM_IOCTL_NOUVEAU_GROBJ_ALLOC        DRM_IOW (DRM_COMMAND_BASE + DRM_NOUVEAU_GROBJ_ALLOC, struct drm_nouveau_grobj_alloc) | ||||
| #define DRM_IOCTL_NOUVEAU_NOTIFIEROBJ_ALLOC  DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_NOTIFIEROBJ_ALLOC, struct drm_nouveau_notifierobj_alloc) | ||||
| #define DRM_IOCTL_NOUVEAU_GPUOBJ_FREE        DRM_IOW (DRM_COMMAND_BASE + DRM_NOUVEAU_GPUOBJ_FREE, struct drm_nouveau_gpuobj_free) | ||||
| 
 | ||||
| #endif | ||||
| @ -6091,6 +6091,18 @@ apply_dcb_encoder_quirks(struct drm_device *dev, int idx, u32 *conn, u32 *conf) | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	/* fdo#50830: connector indices for VGA and DVI-I are backwards */ | ||||
| 	if (nv_match_device(dev, 0x0421, 0x3842, 0xc793)) { | ||||
| 		if (idx == 0 && *conn == 0x02000300) | ||||
| 			*conn = 0x02011300; | ||||
| 		else | ||||
| 		if (idx == 1 && *conn == 0x04011310) | ||||
| 			*conn = 0x04000310; | ||||
| 		else | ||||
| 		if (idx == 2 && *conn == 0x02011312) | ||||
| 			*conn = 0x02000312; | ||||
| 	} | ||||
| 
 | ||||
| 	return true; | ||||
| } | ||||
| 
 | ||||
|  | ||||
| @ -395,98 +395,3 @@ nouveau_channel_cleanup(struct drm_device *dev, struct drm_file *file_priv) | ||||
| 		nouveau_channel_put(&chan); | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| 
 | ||||
| /***********************************
 | ||||
|  * ioctls wrapping the functions | ||||
|  ***********************************/ | ||||
| 
 | ||||
| static int | ||||
| nouveau_ioctl_fifo_alloc(struct drm_device *dev, void *data, | ||||
| 			 struct drm_file *file_priv) | ||||
| { | ||||
| 	struct drm_nouveau_private *dev_priv = dev->dev_private; | ||||
| 	struct drm_nouveau_channel_alloc *init = data; | ||||
| 	struct nouveau_channel *chan; | ||||
| 	int ret; | ||||
| 
 | ||||
| 	if (!dev_priv->eng[NVOBJ_ENGINE_GR]) | ||||
| 		return -ENODEV; | ||||
| 
 | ||||
| 	if (init->fb_ctxdma_handle == ~0 || init->tt_ctxdma_handle == ~0) | ||||
| 		return -EINVAL; | ||||
| 
 | ||||
| 	ret = nouveau_channel_alloc(dev, &chan, file_priv, | ||||
| 				    init->fb_ctxdma_handle, | ||||
| 				    init->tt_ctxdma_handle); | ||||
| 	if (ret) | ||||
| 		return ret; | ||||
| 	init->channel  = chan->id; | ||||
| 
 | ||||
| 	if (nouveau_vram_pushbuf == 0) { | ||||
| 		if (chan->dma.ib_max) | ||||
| 			init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_VRAM | | ||||
| 						NOUVEAU_GEM_DOMAIN_GART; | ||||
| 		else if (chan->pushbuf_bo->bo.mem.mem_type == TTM_PL_VRAM) | ||||
| 			init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_VRAM; | ||||
| 		else | ||||
| 			init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_GART; | ||||
| 	} else { | ||||
| 		init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_VRAM; | ||||
| 	} | ||||
| 
 | ||||
| 	if (dev_priv->card_type < NV_C0) { | ||||
| 		init->subchan[0].handle = 0x00000000; | ||||
| 		init->subchan[0].grclass = 0x0000; | ||||
| 		init->subchan[1].handle = NvSw; | ||||
| 		init->subchan[1].grclass = NV_SW; | ||||
| 		init->nr_subchan = 2; | ||||
| 	} | ||||
| 
 | ||||
| 	/* Named memory object area */ | ||||
| 	ret = drm_gem_handle_create(file_priv, chan->notifier_bo->gem, | ||||
| 				    &init->notifier_handle); | ||||
| 
 | ||||
| 	if (ret == 0) | ||||
| 		atomic_inc(&chan->users); /* userspace reference */ | ||||
| 	nouveau_channel_put(&chan); | ||||
| 	return ret; | ||||
| } | ||||
| 
 | ||||
| static int | ||||
| nouveau_ioctl_fifo_free(struct drm_device *dev, void *data, | ||||
| 			struct drm_file *file_priv) | ||||
| { | ||||
| 	struct drm_nouveau_channel_free *req = data; | ||||
| 	struct nouveau_channel *chan; | ||||
| 
 | ||||
| 	chan = nouveau_channel_get(file_priv, req->channel); | ||||
| 	if (IS_ERR(chan)) | ||||
| 		return PTR_ERR(chan); | ||||
| 
 | ||||
| 	list_del(&chan->list); | ||||
| 	atomic_dec(&chan->users); | ||||
| 	nouveau_channel_put(&chan); | ||||
| 	return 0; | ||||
| } | ||||
| 
 | ||||
| /***********************************
 | ||||
|  * finally, the ioctl table | ||||
|  ***********************************/ | ||||
| 
 | ||||
| struct drm_ioctl_desc nouveau_ioctls[] = { | ||||
| 	DRM_IOCTL_DEF_DRV(NOUVEAU_GETPARAM, nouveau_ioctl_getparam, DRM_UNLOCKED|DRM_AUTH), | ||||
| 	DRM_IOCTL_DEF_DRV(NOUVEAU_SETPARAM, nouveau_ioctl_setparam, DRM_UNLOCKED|DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), | ||||
| 	DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_ALLOC, nouveau_ioctl_fifo_alloc, DRM_UNLOCKED|DRM_AUTH), | ||||
| 	DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_FREE, nouveau_ioctl_fifo_free, DRM_UNLOCKED|DRM_AUTH), | ||||
| 	DRM_IOCTL_DEF_DRV(NOUVEAU_GROBJ_ALLOC, nouveau_ioctl_grobj_alloc, DRM_UNLOCKED|DRM_AUTH), | ||||
| 	DRM_IOCTL_DEF_DRV(NOUVEAU_NOTIFIEROBJ_ALLOC, nouveau_ioctl_notifier_alloc, DRM_UNLOCKED|DRM_AUTH), | ||||
| 	DRM_IOCTL_DEF_DRV(NOUVEAU_GPUOBJ_FREE, nouveau_ioctl_gpuobj_free, DRM_UNLOCKED|DRM_AUTH), | ||||
| 	DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_NEW, nouveau_gem_ioctl_new, DRM_UNLOCKED|DRM_AUTH), | ||||
| 	DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_PUSHBUF, nouveau_gem_ioctl_pushbuf, DRM_UNLOCKED|DRM_AUTH), | ||||
| 	DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_PREP, nouveau_gem_ioctl_cpu_prep, DRM_UNLOCKED|DRM_AUTH), | ||||
| 	DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_FINI, nouveau_gem_ioctl_cpu_fini, DRM_UNLOCKED|DRM_AUTH), | ||||
| 	DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_INFO, nouveau_gem_ioctl_info, DRM_UNLOCKED|DRM_AUTH), | ||||
| }; | ||||
| 
 | ||||
| int nouveau_max_ioctl = DRM_ARRAY_SIZE(nouveau_ioctls); | ||||
|  | ||||
| @ -29,6 +29,7 @@ | ||||
| #include "drm.h" | ||||
| #include "drm_crtc_helper.h" | ||||
| #include "nouveau_drv.h" | ||||
| #include "nouveau_abi16.h" | ||||
| #include "nouveau_hw.h" | ||||
| #include "nouveau_fb.h" | ||||
| #include "nouveau_fbcon.h" | ||||
| @ -384,6 +385,21 @@ nouveau_pci_resume(struct pci_dev *pdev) | ||||
| 	return 0; | ||||
| } | ||||
| 
 | ||||
| static struct drm_ioctl_desc nouveau_ioctls[] = { | ||||
| 	DRM_IOCTL_DEF_DRV(NOUVEAU_GETPARAM, nouveau_abi16_ioctl_getparam, DRM_UNLOCKED|DRM_AUTH), | ||||
| 	DRM_IOCTL_DEF_DRV(NOUVEAU_SETPARAM, nouveau_abi16_ioctl_setparam, DRM_UNLOCKED|DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), | ||||
| 	DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_ALLOC, nouveau_abi16_ioctl_channel_alloc, DRM_UNLOCKED|DRM_AUTH), | ||||
| 	DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_FREE, nouveau_abi16_ioctl_channel_free, DRM_UNLOCKED|DRM_AUTH), | ||||
| 	DRM_IOCTL_DEF_DRV(NOUVEAU_GROBJ_ALLOC, nouveau_abi16_ioctl_grobj_alloc, DRM_UNLOCKED|DRM_AUTH), | ||||
| 	DRM_IOCTL_DEF_DRV(NOUVEAU_NOTIFIEROBJ_ALLOC, nouveau_abi16_ioctl_notifierobj_alloc, DRM_UNLOCKED|DRM_AUTH), | ||||
| 	DRM_IOCTL_DEF_DRV(NOUVEAU_GPUOBJ_FREE, nouveau_abi16_ioctl_gpuobj_free, DRM_UNLOCKED|DRM_AUTH), | ||||
| 	DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_NEW, nouveau_gem_ioctl_new, DRM_UNLOCKED|DRM_AUTH), | ||||
| 	DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_PUSHBUF, nouveau_gem_ioctl_pushbuf, DRM_UNLOCKED|DRM_AUTH), | ||||
| 	DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_PREP, nouveau_gem_ioctl_cpu_prep, DRM_UNLOCKED|DRM_AUTH), | ||||
| 	DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_FINI, nouveau_gem_ioctl_cpu_fini, DRM_UNLOCKED|DRM_AUTH), | ||||
| 	DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_INFO, nouveau_gem_ioctl_info, DRM_UNLOCKED|DRM_AUTH), | ||||
| }; | ||||
| 
 | ||||
| static const struct file_operations nouveau_driver_fops = { | ||||
| 	.owner = THIS_MODULE, | ||||
| 	.open = drm_open, | ||||
| @ -422,7 +438,6 @@ static struct drm_driver driver = { | ||||
| 	.get_vblank_counter = drm_vblank_count, | ||||
| 	.enable_vblank = nouveau_vblank_enable, | ||||
| 	.disable_vblank = nouveau_vblank_disable, | ||||
| 	.reclaim_buffers = drm_core_reclaim_buffers, | ||||
| 	.ioctls = nouveau_ioctls, | ||||
| 	.fops = &nouveau_driver_fops, | ||||
| 
 | ||||
| @ -463,7 +478,7 @@ static struct pci_driver nouveau_pci_driver = { | ||||
| 
 | ||||
| static int __init nouveau_init(void) | ||||
| { | ||||
| 	driver.num_ioctls = nouveau_max_ioctl; | ||||
| 	driver.num_ioctls = ARRAY_SIZE(nouveau_ioctls); | ||||
| 
 | ||||
| 	if (nouveau_modeset == -1) { | ||||
| #ifdef CONFIG_VGA_CONSOLE | ||||
|  | ||||
| @ -689,8 +689,6 @@ struct drm_nouveau_private { | ||||
| 	void (*irq_handler[32])(struct drm_device *); | ||||
| 	bool msi_enabled; | ||||
| 
 | ||||
| 	struct list_head vbl_waiting; | ||||
| 
 | ||||
| 	struct { | ||||
| 		struct drm_global_reference mem_global_ref; | ||||
| 		struct ttm_bo_global_ref bo_global_ref; | ||||
| @ -872,10 +870,6 @@ extern int  nouveau_load(struct drm_device *, unsigned long flags); | ||||
| extern int  nouveau_firstopen(struct drm_device *); | ||||
| extern void nouveau_lastclose(struct drm_device *); | ||||
| extern int  nouveau_unload(struct drm_device *); | ||||
| extern int  nouveau_ioctl_getparam(struct drm_device *, void *data, | ||||
| 				   struct drm_file *); | ||||
| extern int  nouveau_ioctl_setparam(struct drm_device *, void *data, | ||||
| 				   struct drm_file *); | ||||
| extern bool nouveau_wait_eq(struct drm_device *, uint64_t timeout, | ||||
| 			    uint32_t reg, uint32_t mask, uint32_t val); | ||||
| extern bool nouveau_wait_ne(struct drm_device *, uint64_t timeout, | ||||
| @ -914,15 +908,8 @@ extern void nouveau_notifier_takedown_channel(struct nouveau_channel *); | ||||
| extern int  nouveau_notifier_alloc(struct nouveau_channel *, uint32_t handle, | ||||
| 				   int cout, uint32_t start, uint32_t end, | ||||
| 				   uint32_t *offset); | ||||
| extern int  nouveau_notifier_offset(struct nouveau_gpuobj *, uint32_t *); | ||||
| extern int  nouveau_ioctl_notifier_alloc(struct drm_device *, void *data, | ||||
| 					 struct drm_file *); | ||||
| extern int  nouveau_ioctl_notifier_free(struct drm_device *, void *data, | ||||
| 					struct drm_file *); | ||||
| 
 | ||||
| /* nouveau_channel.c */ | ||||
| extern struct drm_ioctl_desc nouveau_ioctls[]; | ||||
| extern int nouveau_max_ioctl; | ||||
| extern void nouveau_channel_cleanup(struct drm_device *, struct drm_file *); | ||||
| extern int  nouveau_channel_alloc(struct drm_device *dev, | ||||
| 				  struct nouveau_channel **chan, | ||||
| @ -938,7 +925,7 @@ extern void nouveau_channel_ref(struct nouveau_channel *chan, | ||||
| 				struct nouveau_channel **pchan); | ||||
| extern int  nouveau_channel_idle(struct nouveau_channel *chan); | ||||
| 
 | ||||
| /* nouveau_object.c */ | ||||
| /* nouveau_gpuobj.c */ | ||||
| #define NVOBJ_ENGINE_ADD(d, e, p) do {                                         \ | ||||
| 	struct drm_nouveau_private *dev_priv = (d)->dev_private;               \ | ||||
| 	dev_priv->eng[NVOBJ_ENGINE_##e] = (p);                                 \ | ||||
| @ -993,10 +980,6 @@ extern int nv50_gpuobj_dma_new(struct nouveau_channel *, int class, u64 base, | ||||
| extern void nv50_gpuobj_dma_init(struct nouveau_gpuobj *, u32 offset, | ||||
| 				 int class, u64 base, u64 size, int target, | ||||
| 				 int access, u32 type, u32 comp); | ||||
| extern int nouveau_ioctl_grobj_alloc(struct drm_device *, void *data, | ||||
| 				     struct drm_file *); | ||||
| extern int nouveau_ioctl_gpuobj_free(struct drm_device *, void *data, | ||||
| 				     struct drm_file *); | ||||
| 
 | ||||
| /* nouveau_irq.c */ | ||||
| extern int         nouveau_irq_init(struct drm_device *); | ||||
|  | ||||
| @ -207,8 +207,7 @@ nouveau_gem_ioctl_new(struct drm_device *dev, void *data, | ||||
| 	struct nouveau_bo *nvbo = NULL; | ||||
| 	int ret = 0; | ||||
| 
 | ||||
| 	if (unlikely(dev_priv->ttm.bdev.dev_mapping == NULL)) | ||||
| 		dev_priv->ttm.bdev.dev_mapping = dev_priv->dev->dev_mapping; | ||||
| 	dev_priv->ttm.bdev.dev_mapping = dev->dev_mapping; | ||||
| 
 | ||||
| 	if (!dev_priv->engine.vram.flags_valid(dev, req->info.tile_flags)) { | ||||
| 		NV_ERROR(dev, "bad page flags: 0x%08x\n", req->info.tile_flags); | ||||
| @ -342,6 +341,7 @@ retry: | ||||
| 		if (nvbo->reserved_by && nvbo->reserved_by == file_priv) { | ||||
| 			NV_ERROR(dev, "multiple instances of buffer %d on " | ||||
| 				      "validation list\n", b->handle); | ||||
| 			drm_gem_object_unreference_unlocked(gem); | ||||
| 			validate_fini(op, NULL); | ||||
| 			return -EINVAL; | ||||
| 		} | ||||
|  | ||||
| @ -758,66 +758,6 @@ nouveau_gpuobj_resume(struct drm_device *dev) | ||||
| 	dev_priv->engine.instmem.flush(dev); | ||||
| } | ||||
| 
 | ||||
| int nouveau_ioctl_grobj_alloc(struct drm_device *dev, void *data, | ||||
| 			      struct drm_file *file_priv) | ||||
| { | ||||
| 	struct drm_nouveau_grobj_alloc *init = data; | ||||
| 	struct nouveau_channel *chan; | ||||
| 	int ret; | ||||
| 
 | ||||
| 	if (init->handle == ~0) | ||||
| 		return -EINVAL; | ||||
| 
 | ||||
| 	/* compatibility with userspace that assumes 506e for all chipsets */ | ||||
| 	if (init->class == 0x506e) { | ||||
| 		init->class = nouveau_software_class(dev); | ||||
| 		if (init->class == 0x906e) | ||||
| 			return 0; | ||||
| 	} else | ||||
| 	if (init->class == 0x906e) { | ||||
| 		NV_ERROR(dev, "906e not supported yet\n"); | ||||
| 		return -EINVAL; | ||||
| 	} | ||||
| 
 | ||||
| 	chan = nouveau_channel_get(file_priv, init->channel); | ||||
| 	if (IS_ERR(chan)) | ||||
| 		return PTR_ERR(chan); | ||||
| 
 | ||||
| 	if (nouveau_ramht_find(chan, init->handle)) { | ||||
| 		ret = -EEXIST; | ||||
| 		goto out; | ||||
| 	} | ||||
| 
 | ||||
| 	ret = nouveau_gpuobj_gr_new(chan, init->handle, init->class); | ||||
| 	if (ret) { | ||||
| 		NV_ERROR(dev, "Error creating object: %d (%d/0x%08x)\n", | ||||
| 			 ret, init->channel, init->handle); | ||||
| 	} | ||||
| 
 | ||||
| out: | ||||
| 	nouveau_channel_put(&chan); | ||||
| 	return ret; | ||||
| } | ||||
| 
 | ||||
| int nouveau_ioctl_gpuobj_free(struct drm_device *dev, void *data, | ||||
| 			      struct drm_file *file_priv) | ||||
| { | ||||
| 	struct drm_nouveau_gpuobj_free *objfree = data; | ||||
| 	struct nouveau_channel *chan; | ||||
| 	int ret; | ||||
| 
 | ||||
| 	chan = nouveau_channel_get(file_priv, objfree->channel); | ||||
| 	if (IS_ERR(chan)) | ||||
| 		return PTR_ERR(chan); | ||||
| 
 | ||||
| 	/* Synchronize with the user channel */ | ||||
| 	nouveau_channel_idle(chan); | ||||
| 
 | ||||
| 	ret = nouveau_ramht_remove(chan, objfree->handle); | ||||
| 	nouveau_channel_put(&chan); | ||||
| 	return ret; | ||||
| } | ||||
| 
 | ||||
| u32 | ||||
| nv_ro32(struct nouveau_gpuobj *gpuobj, u32 offset) | ||||
| { | ||||
| @ -41,12 +41,8 @@ | ||||
| void | ||||
| nouveau_irq_preinstall(struct drm_device *dev) | ||||
| { | ||||
| 	struct drm_nouveau_private *dev_priv = dev->dev_private; | ||||
| 
 | ||||
| 	/* Master disable */ | ||||
| 	nv_wr32(dev, NV03_PMC_INTR_EN_0, 0); | ||||
| 
 | ||||
| 	INIT_LIST_HEAD(&dev_priv->vbl_waiting); | ||||
| } | ||||
| 
 | ||||
| int | ||||
|  | ||||
| @ -161,44 +161,3 @@ nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle, | ||||
| 	*b_offset = mem->start; | ||||
| 	return 0; | ||||
| } | ||||
| 
 | ||||
| int | ||||
| nouveau_notifier_offset(struct nouveau_gpuobj *nobj, uint32_t *poffset) | ||||
| { | ||||
| 	if (!nobj || nobj->dtor != nouveau_notifier_gpuobj_dtor) | ||||
| 		return -EINVAL; | ||||
| 
 | ||||
| 	if (poffset) { | ||||
| 		struct drm_mm_node *mem = nobj->priv; | ||||
| 
 | ||||
| 		if (*poffset >= mem->size) | ||||
| 			return false; | ||||
| 
 | ||||
| 		*poffset += mem->start; | ||||
| 	} | ||||
| 
 | ||||
| 	return 0; | ||||
| } | ||||
| 
 | ||||
| int | ||||
| nouveau_ioctl_notifier_alloc(struct drm_device *dev, void *data, | ||||
| 			     struct drm_file *file_priv) | ||||
| { | ||||
| 	struct drm_nouveau_private *dev_priv = dev->dev_private; | ||||
| 	struct drm_nouveau_notifierobj_alloc *na = data; | ||||
| 	struct nouveau_channel *chan; | ||||
| 	int ret; | ||||
| 
 | ||||
| 	/* completely unnecessary for these chipsets... */ | ||||
| 	if (unlikely(dev_priv->card_type >= NV_C0)) | ||||
| 		return -EINVAL; | ||||
| 
 | ||||
| 	chan = nouveau_channel_get(file_priv, na->channel); | ||||
| 	if (IS_ERR(chan)) | ||||
| 		return PTR_ERR(chan); | ||||
| 
 | ||||
| 	ret = nouveau_notifier_alloc(chan, na->handle, na->size, 0, 0x1000, | ||||
| 				     &na->offset); | ||||
| 	nouveau_channel_put(&chan); | ||||
| 	return ret; | ||||
| } | ||||
|  | ||||
| @ -4,46 +4,33 @@ | ||||
| struct nouveau_software_priv { | ||||
| 	struct nouveau_exec_engine base; | ||||
| 	struct list_head vblank; | ||||
| 	spinlock_t peephole_lock; | ||||
| }; | ||||
| 
 | ||||
| struct nouveau_software_chan { | ||||
| 	struct list_head flip; | ||||
| 	struct { | ||||
| 		struct list_head list; | ||||
| 		struct nouveau_bo *bo; | ||||
| 		u32 channel; | ||||
| 		u32 ctxdma; | ||||
| 		u32 offset; | ||||
| 		u32 value; | ||||
| 		u32 head; | ||||
| 	} vblank; | ||||
| }; | ||||
| 
 | ||||
| static inline void | ||||
| nouveau_software_vblank(struct drm_device *dev, int crtc) | ||||
| { | ||||
| 	struct nouveau_software_priv *psw = nv_engine(dev, NVOBJ_ENGINE_SW); | ||||
| 	struct nouveau_software_chan *pch, *tmp; | ||||
| 
 | ||||
| 	list_for_each_entry_safe(pch, tmp, &psw->vblank, vblank.list) { | ||||
| 		if (pch->vblank.head != crtc) | ||||
| 			continue; | ||||
| 
 | ||||
| 		nouveau_bo_wr32(pch->vblank.bo, pch->vblank.offset, | ||||
| 						pch->vblank.value); | ||||
| 		list_del(&pch->vblank.list); | ||||
| 		drm_vblank_put(dev, crtc); | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| static inline void | ||||
| nouveau_software_context_new(struct nouveau_software_chan *pch) | ||||
| { | ||||
| 	INIT_LIST_HEAD(&pch->flip); | ||||
| 	INIT_LIST_HEAD(&pch->vblank.list); | ||||
| } | ||||
| 
 | ||||
| static inline void | ||||
| nouveau_software_create(struct nouveau_software_priv *psw) | ||||
| { | ||||
| 	INIT_LIST_HEAD(&psw->vblank); | ||||
| 	spin_lock_init(&psw->peephole_lock); | ||||
| } | ||||
| 
 | ||||
| static inline u16 | ||||
|  | ||||
| @ -1234,80 +1234,6 @@ int nouveau_unload(struct drm_device *dev) | ||||
| 	return 0; | ||||
| } | ||||
| 
 | ||||
| int nouveau_ioctl_getparam(struct drm_device *dev, void *data, | ||||
| 						struct drm_file *file_priv) | ||||
| { | ||||
| 	struct drm_nouveau_private *dev_priv = dev->dev_private; | ||||
| 	struct drm_nouveau_getparam *getparam = data; | ||||
| 
 | ||||
| 	switch (getparam->param) { | ||||
| 	case NOUVEAU_GETPARAM_CHIPSET_ID: | ||||
| 		getparam->value = dev_priv->chipset; | ||||
| 		break; | ||||
| 	case NOUVEAU_GETPARAM_PCI_VENDOR: | ||||
| 		getparam->value = dev->pci_vendor; | ||||
| 		break; | ||||
| 	case NOUVEAU_GETPARAM_PCI_DEVICE: | ||||
| 		getparam->value = dev->pci_device; | ||||
| 		break; | ||||
| 	case NOUVEAU_GETPARAM_BUS_TYPE: | ||||
| 		if (drm_pci_device_is_agp(dev)) | ||||
| 			getparam->value = NV_AGP; | ||||
| 		else if (pci_is_pcie(dev->pdev)) | ||||
| 			getparam->value = NV_PCIE; | ||||
| 		else | ||||
| 			getparam->value = NV_PCI; | ||||
| 		break; | ||||
| 	case NOUVEAU_GETPARAM_FB_SIZE: | ||||
| 		getparam->value = dev_priv->fb_available_size; | ||||
| 		break; | ||||
| 	case NOUVEAU_GETPARAM_AGP_SIZE: | ||||
| 		getparam->value = dev_priv->gart_info.aper_size; | ||||
| 		break; | ||||
| 	case NOUVEAU_GETPARAM_VM_VRAM_BASE: | ||||
| 		getparam->value = 0; /* deprecated */ | ||||
| 		break; | ||||
| 	case NOUVEAU_GETPARAM_PTIMER_TIME: | ||||
| 		getparam->value = dev_priv->engine.timer.read(dev); | ||||
| 		break; | ||||
| 	case NOUVEAU_GETPARAM_HAS_BO_USAGE: | ||||
| 		getparam->value = 1; | ||||
| 		break; | ||||
| 	case NOUVEAU_GETPARAM_HAS_PAGEFLIP: | ||||
| 		getparam->value = 1; | ||||
| 		break; | ||||
| 	case NOUVEAU_GETPARAM_GRAPH_UNITS: | ||||
| 		/* NV40 and NV50 versions are quite different, but register
 | ||||
| 		 * address is the same. User is supposed to know the card | ||||
| 		 * family anyway... */ | ||||
| 		if (dev_priv->chipset >= 0x40) { | ||||
| 			getparam->value = nv_rd32(dev, NV40_PMC_GRAPH_UNITS); | ||||
| 			break; | ||||
| 		} | ||||
| 		/* FALLTHRU */ | ||||
| 	default: | ||||
| 		NV_DEBUG(dev, "unknown parameter %lld\n", getparam->param); | ||||
| 		return -EINVAL; | ||||
| 	} | ||||
| 
 | ||||
| 	return 0; | ||||
| } | ||||
| 
 | ||||
| int | ||||
| nouveau_ioctl_setparam(struct drm_device *dev, void *data, | ||||
| 		       struct drm_file *file_priv) | ||||
| { | ||||
| 	struct drm_nouveau_setparam *setparam = data; | ||||
| 
 | ||||
| 	switch (setparam->param) { | ||||
| 	default: | ||||
| 		NV_DEBUG(dev, "unknown parameter %lld\n", setparam->param); | ||||
| 		return -EINVAL; | ||||
| 	} | ||||
| 
 | ||||
| 	return 0; | ||||
| } | ||||
| 
 | ||||
| /* Wait until (value(reg) & mask) == val, up until timeout has hit */ | ||||
| bool | ||||
| nouveau_wait_eq(struct drm_device *dev, uint64_t timeout, | ||||
|  | ||||
Some files were not shown because too many files have changed in this diff Show More
		Loading…
	
		Reference in New Issue
	
	Block a user