diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c index 24952fdc7e40..dd38bfbefd1f 100644 --- a/arch/x86/mm/pageattr.c +++ b/arch/x86/mm/pageattr.c @@ -144,6 +144,7 @@ void clflush_cache_range(void *vaddr, unsigned int size) mb(); } +EXPORT_SYMBOL_GPL(clflush_cache_range); static void __cpa_flush_all(void *arg) { diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c index d295bdccc09c..9335b87c5174 100644 --- a/drivers/acpi/button.c +++ b/drivers/acpi/button.c @@ -115,6 +115,9 @@ static const struct file_operations acpi_button_state_fops = { .release = single_release, }; +static BLOCKING_NOTIFIER_HEAD(acpi_lid_notifier); +static struct acpi_device *lid_device; + /* -------------------------------------------------------------------------- FS Interface (/proc) -------------------------------------------------------------------------- */ @@ -231,11 +234,38 @@ static int acpi_button_remove_fs(struct acpi_device *device) /* -------------------------------------------------------------------------- Driver Interface -------------------------------------------------------------------------- */ +int acpi_lid_notifier_register(struct notifier_block *nb) +{ + return blocking_notifier_chain_register(&acpi_lid_notifier, nb); +} +EXPORT_SYMBOL(acpi_lid_notifier_register); + +int acpi_lid_notifier_unregister(struct notifier_block *nb) +{ + return blocking_notifier_chain_unregister(&acpi_lid_notifier, nb); +} +EXPORT_SYMBOL(acpi_lid_notifier_unregister); + +int acpi_lid_open(void) +{ + acpi_status status; + unsigned long long state; + + status = acpi_evaluate_integer(lid_device->handle, "_LID", NULL, + &state); + if (ACPI_FAILURE(status)) + return -ENODEV; + + return !!state; +} +EXPORT_SYMBOL(acpi_lid_open); + static int acpi_lid_send_state(struct acpi_device *device) { struct acpi_button *button = acpi_driver_data(device); unsigned long long state; acpi_status status; + int ret; status = acpi_evaluate_integer(device->handle, "_LID", NULL, &state); if (ACPI_FAILURE(status)) @@ -244,7 +274,12 @@ static int acpi_lid_send_state(struct acpi_device *device) /* input layer checks if event is redundant */ input_report_switch(button->input, SW_LID, !state); input_sync(button->input); - return 0; + + ret = blocking_notifier_call_chain(&acpi_lid_notifier, state, device); + if (ret == NOTIFY_DONE) + ret = blocking_notifier_call_chain(&acpi_lid_notifier, state, + device); + return ret; } static void acpi_button_notify(struct acpi_device *device, u32 event) @@ -366,8 +401,14 @@ static int acpi_button_add(struct acpi_device *device) error = input_register_device(input); if (error) goto err_remove_fs; - if (button->type == ACPI_BUTTON_TYPE_LID) + if (button->type == ACPI_BUTTON_TYPE_LID) { acpi_lid_send_state(device); + /* + * This assumes there's only one lid device, or if there are + * more we only care about the last one... + */ + lid_device = device; + } if (device->wakeup.flags.valid) { /* Button's GPE is run-wake GPE */ diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c index 1540e693d91e..4068467ce7b9 100644 --- a/drivers/char/agp/intel-agp.c +++ b/drivers/char/agp/intel-agp.c @@ -46,6 +46,8 @@ #define PCI_DEVICE_ID_INTEL_Q35_IG 0x29B2 #define PCI_DEVICE_ID_INTEL_Q33_HB 0x29D0 #define PCI_DEVICE_ID_INTEL_Q33_IG 0x29D2 +#define PCI_DEVICE_ID_INTEL_B43_HB 0x2E40 +#define PCI_DEVICE_ID_INTEL_B43_IG 0x2E42 #define PCI_DEVICE_ID_INTEL_GM45_HB 0x2A40 #define PCI_DEVICE_ID_INTEL_GM45_IG 0x2A42 #define PCI_DEVICE_ID_INTEL_IGD_E_HB 0x2E00 @@ -91,6 +93,7 @@ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G45_HB || \ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_GM45_HB || \ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G41_HB || \ + agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_B43_HB || \ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDNG_D_HB || \ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDNG_M_HB || \ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDNG_MA_HB) @@ -804,23 +807,39 @@ static void intel_i830_setup_flush(void) if (!intel_private.i8xx_page) return; - /* make page uncached */ - map_page_into_agp(intel_private.i8xx_page); - intel_private.i8xx_flush_page = kmap(intel_private.i8xx_page); if (!intel_private.i8xx_flush_page) intel_i830_fini_flush(); } +static void +do_wbinvd(void *null) +{ + wbinvd(); +} + +/* The chipset_flush interface needs to get data that has already been + * flushed out of the CPU all the way out to main memory, because the GPU + * doesn't snoop those buffers. + * + * The 8xx series doesn't have the same lovely interface for flushing the + * chipset write buffers that the later chips do. According to the 865 + * specs, it's 64 octwords, or 1KB. So, to get those previous things in + * that buffer out, we just fill 1KB and clflush it out, on the assumption + * that it'll push whatever was in there out. It appears to work. + */ static void intel_i830_chipset_flush(struct agp_bridge_data *bridge) { unsigned int *pg = intel_private.i8xx_flush_page; - int i; - for (i = 0; i < 256; i += 2) - *(pg + i) = i; + memset(pg, 0, 1024); - wmb(); + if (cpu_has_clflush) { + clflush_cache_range(pg, 1024); + } else { + if (on_each_cpu(do_wbinvd, NULL, 1) != 0) + printk(KERN_ERR "Timed out waiting for cache flush.\n"); + } } /* The intel i830 automatically initializes the agp aperture during POST. @@ -1341,6 +1360,7 @@ static void intel_i965_get_gtt_range(int *gtt_offset, int *gtt_size) case PCI_DEVICE_ID_INTEL_Q45_HB: case PCI_DEVICE_ID_INTEL_G45_HB: case PCI_DEVICE_ID_INTEL_G41_HB: + case PCI_DEVICE_ID_INTEL_B43_HB: case PCI_DEVICE_ID_INTEL_IGDNG_D_HB: case PCI_DEVICE_ID_INTEL_IGDNG_M_HB: case PCI_DEVICE_ID_INTEL_IGDNG_MA_HB: @@ -2335,6 +2355,8 @@ static const struct intel_driver_description { "Q45/Q43", NULL, &intel_i965_driver }, { PCI_DEVICE_ID_INTEL_G45_HB, PCI_DEVICE_ID_INTEL_G45_IG, 0, "G45/G43", NULL, &intel_i965_driver }, + { PCI_DEVICE_ID_INTEL_B43_HB, PCI_DEVICE_ID_INTEL_B43_IG, 0, + "B43", NULL, &intel_i965_driver }, { PCI_DEVICE_ID_INTEL_G41_HB, PCI_DEVICE_ID_INTEL_G41_IG, 0, "G41", NULL, &intel_i965_driver }, { PCI_DEVICE_ID_INTEL_IGDNG_D_HB, PCI_DEVICE_ID_INTEL_IGDNG_D_IG, 0, @@ -2535,6 +2557,7 @@ static struct pci_device_id agp_intel_pci_table[] = { ID(PCI_DEVICE_ID_INTEL_Q45_HB), ID(PCI_DEVICE_ID_INTEL_G45_HB), ID(PCI_DEVICE_ID_INTEL_G41_HB), + ID(PCI_DEVICE_ID_INTEL_B43_HB), ID(PCI_DEVICE_ID_INTEL_IGDNG_D_HB), ID(PCI_DEVICE_ID_INTEL_IGDNG_M_HB), ID(PCI_DEVICE_ID_INTEL_IGDNG_MA_HB), diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig index e4d971c8b9d0..f831ea159291 100644 --- a/drivers/gpu/drm/Kconfig +++ b/drivers/gpu/drm/Kconfig @@ -102,6 +102,7 @@ config DRM_I915 select BACKLIGHT_CLASS_DEVICE if ACPI select INPUT if ACPI select ACPI_VIDEO if ACPI + select ACPI_BUTTON if ACPI help Choose this option if you have a system that has Intel 830M, 845G, 852GM, 855GM 865G or 915G integrated graphics. If M is selected, the diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index 230c9ffdd5e9..80391995bdec 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c @@ -142,6 +142,19 @@ drm_gem_object_alloc(struct drm_device *dev, size_t size) if (IS_ERR(obj->filp)) goto free; + /* Basically we want to disable the OOM killer and handle ENOMEM + * ourselves by sacrificing pages from cached buffers. + * XXX shmem_file_[gs]et_gfp_mask() + */ + mapping_set_gfp_mask(obj->filp->f_path.dentry->d_inode->i_mapping, + GFP_HIGHUSER | + __GFP_COLD | + __GFP_FS | + __GFP_RECLAIMABLE | + __GFP_NORETRY | + __GFP_NOWARN | + __GFP_NOMEMALLOC); + kref_init(&obj->refcount); kref_init(&obj->handlecount); obj->size = size; diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index 5269dfa5f620..fa7b9be096bc 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile @@ -9,6 +9,7 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o i915_mem.o \ i915_gem.o \ i915_gem_debug.o \ i915_gem_tiling.o \ + i915_trace_points.o \ intel_display.o \ intel_crt.o \ intel_lvds.o \ diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 1e3bdcee863c..f8ce9a3a420d 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -96,11 +96,13 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data) { struct drm_gem_object *obj = obj_priv->obj; - seq_printf(m, " %p: %s %08x %08x %d", + seq_printf(m, " %p: %s %8zd %08x %08x %d %s", obj, get_pin_flag(obj_priv), + obj->size, obj->read_domains, obj->write_domain, - obj_priv->last_rendering_seqno); + obj_priv->last_rendering_seqno, + obj_priv->dirty ? "dirty" : ""); if (obj->name) seq_printf(m, " (name: %d)", obj->name); diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 5a49a1867b35..45d507ebd3ff 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -33,6 +33,7 @@ #include "intel_drv.h" #include "i915_drm.h" #include "i915_drv.h" +#include "i915_trace.h" #include /* Really want an OS-independent resettable timer. Would like to have @@ -50,14 +51,18 @@ int i915_wait_ring(struct drm_device * dev, int n, const char *caller) u32 last_head = I915_READ(PRB0_HEAD) & HEAD_ADDR; int i; + trace_i915_ring_wait_begin (dev); + for (i = 0; i < 100000; i++) { ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR; acthd = I915_READ(acthd_reg); ring->space = ring->head - (ring->tail + 8); if (ring->space < 0) ring->space += ring->Size; - if (ring->space >= n) + if (ring->space >= n) { + trace_i915_ring_wait_end (dev); return 0; + } if (dev->primary->master) { struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; @@ -77,6 +82,7 @@ int i915_wait_ring(struct drm_device * dev, int n, const char *caller) } + trace_i915_ring_wait_end (dev); return -EBUSY; } @@ -922,7 +928,8 @@ static int i915_get_bridge_dev(struct drm_device *dev) * how much was set aside so we can use it for our own purposes. */ static int i915_probe_agp(struct drm_device *dev, uint32_t *aperture_size, - uint32_t *preallocated_size) + uint32_t *preallocated_size, + uint32_t *start) { struct drm_i915_private *dev_priv = dev->dev_private; u16 tmp = 0; @@ -1009,10 +1016,159 @@ static int i915_probe_agp(struct drm_device *dev, uint32_t *aperture_size, return -1; } *preallocated_size = stolen - overhead; + *start = overhead; return 0; } +#define PTE_ADDRESS_MASK 0xfffff000 +#define PTE_ADDRESS_MASK_HIGH 0x000000f0 /* i915+ */ +#define PTE_MAPPING_TYPE_UNCACHED (0 << 1) +#define PTE_MAPPING_TYPE_DCACHE (1 << 1) /* i830 only */ +#define PTE_MAPPING_TYPE_CACHED (3 << 1) +#define PTE_MAPPING_TYPE_MASK (3 << 1) +#define PTE_VALID (1 << 0) + +/** + * i915_gtt_to_phys - take a GTT address and turn it into a physical one + * @dev: drm device + * @gtt_addr: address to translate + * + * Some chip functions require allocations from stolen space but need the + * physical address of the memory in question. We use this routine + * to get a physical address suitable for register programming from a given + * GTT address. + */ +static unsigned long i915_gtt_to_phys(struct drm_device *dev, + unsigned long gtt_addr) +{ + unsigned long *gtt; + unsigned long entry, phys; + int gtt_bar = IS_I9XX(dev) ? 0 : 1; + int gtt_offset, gtt_size; + + if (IS_I965G(dev)) { + if (IS_G4X(dev) || IS_IGDNG(dev)) { + gtt_offset = 2*1024*1024; + gtt_size = 2*1024*1024; + } else { + gtt_offset = 512*1024; + gtt_size = 512*1024; + } + } else { + gtt_bar = 3; + gtt_offset = 0; + gtt_size = pci_resource_len(dev->pdev, gtt_bar); + } + + gtt = ioremap_wc(pci_resource_start(dev->pdev, gtt_bar) + gtt_offset, + gtt_size); + if (!gtt) { + DRM_ERROR("ioremap of GTT failed\n"); + return 0; + } + + entry = *(volatile u32 *)(gtt + (gtt_addr / 1024)); + + DRM_DEBUG("GTT addr: 0x%08lx, PTE: 0x%08lx\n", gtt_addr, entry); + + /* Mask out these reserved bits on this hardware. */ + if (!IS_I9XX(dev) || IS_I915G(dev) || IS_I915GM(dev) || + IS_I945G(dev) || IS_I945GM(dev)) { + entry &= ~PTE_ADDRESS_MASK_HIGH; + } + + /* If it's not a mapping type we know, then bail. */ + if ((entry & PTE_MAPPING_TYPE_MASK) != PTE_MAPPING_TYPE_UNCACHED && + (entry & PTE_MAPPING_TYPE_MASK) != PTE_MAPPING_TYPE_CACHED) { + iounmap(gtt); + return 0; + } + + if (!(entry & PTE_VALID)) { + DRM_ERROR("bad GTT entry in stolen space\n"); + iounmap(gtt); + return 0; + } + + iounmap(gtt); + + phys =(entry & PTE_ADDRESS_MASK) | + ((uint64_t)(entry & PTE_ADDRESS_MASK_HIGH) << (32 - 4)); + + DRM_DEBUG("GTT addr: 0x%08lx, phys addr: 0x%08lx\n", gtt_addr, phys); + + return phys; +} + +static void i915_warn_stolen(struct drm_device *dev) +{ + DRM_ERROR("not enough stolen space for compressed buffer, disabling\n"); + DRM_ERROR("hint: you may be able to increase stolen memory size in the BIOS to avoid this\n"); +} + +static void i915_setup_compression(struct drm_device *dev, int size) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_mm_node *compressed_fb, *compressed_llb; + unsigned long cfb_base, ll_base; + + /* Leave 1M for line length buffer & misc. */ + compressed_fb = drm_mm_search_free(&dev_priv->vram, size, 4096, 0); + if (!compressed_fb) { + i915_warn_stolen(dev); + return; + } + + compressed_fb = drm_mm_get_block(compressed_fb, size, 4096); + if (!compressed_fb) { + i915_warn_stolen(dev); + return; + } + + cfb_base = i915_gtt_to_phys(dev, compressed_fb->start); + if (!cfb_base) { + DRM_ERROR("failed to get stolen phys addr, disabling FBC\n"); + drm_mm_put_block(compressed_fb); + } + + if (!IS_GM45(dev)) { + compressed_llb = drm_mm_search_free(&dev_priv->vram, 4096, + 4096, 0); + if (!compressed_llb) { + i915_warn_stolen(dev); + return; + } + + compressed_llb = drm_mm_get_block(compressed_llb, 4096, 4096); + if (!compressed_llb) { + i915_warn_stolen(dev); + return; + } + + ll_base = i915_gtt_to_phys(dev, compressed_llb->start); + if (!ll_base) { + DRM_ERROR("failed to get stolen phys addr, disabling FBC\n"); + drm_mm_put_block(compressed_fb); + drm_mm_put_block(compressed_llb); + } + } + + dev_priv->cfb_size = size; + + if (IS_GM45(dev)) { + g4x_disable_fbc(dev); + I915_WRITE(DPFC_CB_BASE, compressed_fb->start); + } else { + i8xx_disable_fbc(dev); + I915_WRITE(FBC_CFB_BASE, cfb_base); + I915_WRITE(FBC_LL_BASE, ll_base); + } + + DRM_DEBUG("FBC base 0x%08lx, ll base 0x%08lx, size %dM\n", cfb_base, + ll_base, size >> 20); +} + /* true = enable decode, false = disable decoder */ static unsigned int i915_vga_set_decode(void *cookie, bool state) { @@ -1027,6 +1183,7 @@ static unsigned int i915_vga_set_decode(void *cookie, bool state) } static int i915_load_modeset_init(struct drm_device *dev, + unsigned long prealloc_start, unsigned long prealloc_size, unsigned long agp_size) { @@ -1047,6 +1204,10 @@ static int i915_load_modeset_init(struct drm_device *dev, /* Basic memrange allocator for stolen space (aka vram) */ drm_mm_init(&dev_priv->vram, 0, prealloc_size); + DRM_INFO("set up %ldM of stolen space\n", prealloc_size / (1024*1024)); + + /* We're off and running w/KMS */ + dev_priv->mm.suspended = 0; /* Let GEM Manage from end of prealloc space to end of aperture. * @@ -1059,10 +1220,25 @@ static int i915_load_modeset_init(struct drm_device *dev, */ i915_gem_do_init(dev, prealloc_size, agp_size - 4096); + mutex_lock(&dev->struct_mutex); ret = i915_gem_init_ringbuffer(dev); + mutex_unlock(&dev->struct_mutex); if (ret) goto out; + /* Try to set up FBC with a reasonable compressed buffer size */ + if (IS_MOBILE(dev) && (IS_I9XX(dev) || IS_I965G(dev) || IS_GM45(dev)) && + i915_powersave) { + int cfb_size; + + /* Try to get an 8M buffer... */ + if (prealloc_size > (9*1024*1024)) + cfb_size = 8*1024*1024; + else /* fall back to 7/8 of the stolen space */ + cfb_size = prealloc_size * 7 / 8; + i915_setup_compression(dev, cfb_size); + } + /* Allow hardware batchbuffers unless told otherwise. */ dev_priv->allow_batchbuffer = 1; @@ -1180,7 +1356,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) struct drm_i915_private *dev_priv = dev->dev_private; resource_size_t base, size; int ret = 0, mmio_bar = IS_I9XX(dev) ? 0 : 1; - uint32_t agp_size, prealloc_size; + uint32_t agp_size, prealloc_size, prealloc_start; /* i915 has 4 more counters */ dev->counters += 4; @@ -1234,7 +1410,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) "performance may suffer.\n"); } - ret = i915_probe_agp(dev, &agp_size, &prealloc_size); + ret = i915_probe_agp(dev, &agp_size, &prealloc_size, &prealloc_start); if (ret) goto out_iomapfree; @@ -1300,8 +1476,12 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) return ret; } + /* Start out suspended */ + dev_priv->mm.suspended = 1; + if (drm_core_check_feature(dev, DRIVER_MODESET)) { - ret = i915_load_modeset_init(dev, prealloc_size, agp_size); + ret = i915_load_modeset_init(dev, prealloc_start, + prealloc_size, agp_size); if (ret < 0) { DRM_ERROR("failed to init modeset\n"); goto out_workqueue_free; @@ -1313,6 +1493,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) if (!IS_IGDNG(dev)) intel_opregion_init(dev, 0); + setup_timer(&dev_priv->hangcheck_timer, i915_hangcheck_elapsed, + (unsigned long) dev); return 0; out_workqueue_free: @@ -1333,6 +1515,7 @@ int i915_driver_unload(struct drm_device *dev) struct drm_i915_private *dev_priv = dev->dev_private; destroy_workqueue(dev_priv->wq); + del_timer_sync(&dev_priv->hangcheck_timer); io_mapping_free(dev_priv->mm.gtt_mapping); if (dev_priv->mm.gtt_mtrr >= 0) { @@ -1472,6 +1655,7 @@ struct drm_ioctl_desc i915_ioctls[] = { DRM_IOCTL_DEF(DRM_I915_GEM_GET_TILING, i915_gem_get_tiling, 0), DRM_IOCTL_DEF(DRM_I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, 0), DRM_IOCTL_DEF(DRM_I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, 0), + DRM_IOCTL_DEF(DRM_I915_GEM_MADVISE, i915_gem_madvise_ioctl, 0), }; int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls); diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index dbe568c9327b..b93814c0d3e2 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -89,6 +89,8 @@ static int i915_suspend(struct drm_device *dev, pm_message_t state) pci_set_power_state(dev->pdev, PCI_D3hot); } + dev_priv->suspended = 1; + return 0; } @@ -97,8 +99,6 @@ static int i915_resume(struct drm_device *dev) struct drm_i915_private *dev_priv = dev->dev_private; int ret = 0; - pci_set_power_state(dev->pdev, PCI_D0); - pci_restore_state(dev->pdev); if (pci_enable_device(dev->pdev)) return -1; pci_set_master(dev->pdev); @@ -124,9 +124,135 @@ static int i915_resume(struct drm_device *dev) drm_helper_resume_force_mode(dev); } + dev_priv->suspended = 0; + return ret; } +/** + * i965_reset - reset chip after a hang + * @dev: drm device to reset + * @flags: reset domains + * + * Reset the chip. Useful if a hang is detected. Returns zero on successful + * reset or otherwise an error code. + * + * Procedure is fairly simple: + * - reset the chip using the reset reg + * - re-init context state + * - re-init hardware status page + * - re-init ring buffer + * - re-init interrupt state + * - re-init display + */ +int i965_reset(struct drm_device *dev, u8 flags) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + unsigned long timeout; + u8 gdrst; + /* + * We really should only reset the display subsystem if we actually + * need to + */ + bool need_display = true; + + mutex_lock(&dev->struct_mutex); + + /* + * Clear request list + */ + i915_gem_retire_requests(dev); + + if (need_display) + i915_save_display(dev); + + if (IS_I965G(dev) || IS_G4X(dev)) { + /* + * Set the domains we want to reset, then the reset bit (bit 0). + * Clear the reset bit after a while and wait for hardware status + * bit (bit 1) to be set + */ + pci_read_config_byte(dev->pdev, GDRST, &gdrst); + pci_write_config_byte(dev->pdev, GDRST, gdrst | flags | ((flags == GDRST_FULL) ? 0x1 : 0x0)); + udelay(50); + pci_write_config_byte(dev->pdev, GDRST, gdrst & 0xfe); + + /* ...we don't want to loop forever though, 500ms should be plenty */ + timeout = jiffies + msecs_to_jiffies(500); + do { + udelay(100); + pci_read_config_byte(dev->pdev, GDRST, &gdrst); + } while ((gdrst & 0x1) && time_after(timeout, jiffies)); + + if (gdrst & 0x1) { + WARN(true, "i915: Failed to reset chip\n"); + mutex_unlock(&dev->struct_mutex); + return -EIO; + } + } else { + DRM_ERROR("Error occurred. Don't know how to reset this chip.\n"); + return -ENODEV; + } + + /* Ok, now get things going again... */ + + /* + * Everything depends on having the GTT running, so we need to start + * there. Fortunately we don't need to do this unless we reset the + * chip at a PCI level. + * + * Next we need to restore the context, but we don't use those + * yet either... + * + * Ring buffer needs to be re-initialized in the KMS case, or if X + * was running at the time of the reset (i.e. we weren't VT + * switched away). + */ + if (drm_core_check_feature(dev, DRIVER_MODESET) || + !dev_priv->mm.suspended) { + drm_i915_ring_buffer_t *ring = &dev_priv->ring; + struct drm_gem_object *obj = ring->ring_obj; + struct drm_i915_gem_object *obj_priv = obj->driver_private; + dev_priv->mm.suspended = 0; + + /* Stop the ring if it's running. */ + I915_WRITE(PRB0_CTL, 0); + I915_WRITE(PRB0_TAIL, 0); + I915_WRITE(PRB0_HEAD, 0); + + /* Initialize the ring. */ + I915_WRITE(PRB0_START, obj_priv->gtt_offset); + I915_WRITE(PRB0_CTL, + ((obj->size - 4096) & RING_NR_PAGES) | + RING_NO_REPORT | + RING_VALID); + if (!drm_core_check_feature(dev, DRIVER_MODESET)) + i915_kernel_lost_context(dev); + else { + ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR; + ring->tail = I915_READ(PRB0_TAIL) & TAIL_ADDR; + ring->space = ring->head - (ring->tail + 8); + if (ring->space < 0) + ring->space += ring->Size; + } + + mutex_unlock(&dev->struct_mutex); + drm_irq_uninstall(dev); + drm_irq_install(dev); + mutex_lock(&dev->struct_mutex); + } + + /* + * Display needs restore too... + */ + if (need_display) + i915_restore_display(dev); + + mutex_unlock(&dev->struct_mutex); + return 0; +} + + static int __devinit i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { @@ -234,6 +360,8 @@ static int __init i915_init(void) { driver.num_ioctls = i915_max_ioctl; + i915_gem_shrinker_init(); + /* * If CONFIG_DRM_I915_KMS is set, default to KMS unless * explicitly disabled with the module pararmeter. @@ -260,6 +388,7 @@ static int __init i915_init(void) static void __exit i915_exit(void) { + i915_gem_shrinker_exit(); drm_exit(&driver); } diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index a0632f8e76ac..b24b2d145b75 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -48,6 +48,11 @@ enum pipe { PIPE_B, }; +enum plane { + PLANE_A = 0, + PLANE_B, +}; + #define I915_NUM_PIPE 2 /* Interface history: @@ -148,6 +153,23 @@ struct drm_i915_error_state { struct timeval time; }; +struct drm_i915_display_funcs { + void (*dpms)(struct drm_crtc *crtc, int mode); + bool (*fbc_enabled)(struct drm_crtc *crtc); + void (*enable_fbc)(struct drm_crtc *crtc, unsigned long interval); + void (*disable_fbc)(struct drm_device *dev); + int (*get_display_clock_speed)(struct drm_device *dev); + int (*get_fifo_size)(struct drm_device *dev, int plane); + void (*update_wm)(struct drm_device *dev, int planea_clock, + int planeb_clock, int sr_hdisplay, int pixel_size); + /* clock updates for mode set */ + /* cursor updates */ + /* render clock increase/decrease */ + /* display clock increase/decrease */ + /* pll clock increase/decrease */ + /* clock gating init */ +}; + typedef struct drm_i915_private { struct drm_device *dev; @@ -198,10 +220,21 @@ typedef struct drm_i915_private { unsigned int sr01, adpa, ppcr, dvob, dvoc, lvds; int vblank_pipe; + /* For hangcheck timer */ +#define DRM_I915_HANGCHECK_PERIOD 75 /* in jiffies */ + struct timer_list hangcheck_timer; + int hangcheck_count; + uint32_t last_acthd; + bool cursor_needs_physical; struct drm_mm vram; + unsigned long cfb_size; + unsigned long cfb_pitch; + int cfb_fence; + int cfb_plane; + int irq_enabled; struct intel_opregion opregion; @@ -222,6 +255,8 @@ typedef struct drm_i915_private { unsigned int edp_support:1; int lvds_ssc_freq; + struct notifier_block lid_notifier; + int crt_ddc_bus; /* -1 = unknown, else GPIO to use for CRT DDC */ struct drm_i915_fence_reg fence_regs[16]; /* assume 965 */ int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */ @@ -234,7 +269,11 @@ typedef struct drm_i915_private { struct work_struct error_work; struct workqueue_struct *wq; + /* Display functions */ + struct drm_i915_display_funcs display; + /* Register state */ + bool suspended; u8 saveLBB; u32 saveDSPACNTR; u32 saveDSPBCNTR; @@ -349,6 +388,15 @@ typedef struct drm_i915_private { struct io_mapping *gtt_mapping; int gtt_mtrr; + /** + * Membership on list of all loaded devices, used to evict + * inactive buffers under memory pressure. + * + * Modifications should only be done whilst holding the + * shrink_list_lock spinlock. + */ + struct list_head shrink_list; + /** * List of objects currently involved in rendering from the * ringbuffer. @@ -432,7 +480,7 @@ typedef struct drm_i915_private { * It prevents command submission from occuring and makes * every pending request fail */ - int wedged; + atomic_t wedged; /** Bit 6 swizzling required for X tiling */ uint32_t bit_6_swizzle_x; @@ -491,10 +539,7 @@ struct drm_i915_gem_object { * This is the same as gtt_space->start */ uint32_t gtt_offset; - /** - * Required alignment for the object - */ - uint32_t gtt_alignment; + /** * Fake offset for use by mmap(2) */ @@ -541,6 +586,11 @@ struct drm_i915_gem_object { * in an execbuffer object list. */ int in_execbuffer; + + /** + * Advice: are the backing pages purgeable? + */ + int madv; }; /** @@ -585,6 +635,8 @@ extern int i915_max_ioctl; extern unsigned int i915_fbpercrtc; extern unsigned int i915_powersave; +extern void i915_save_display(struct drm_device *dev); +extern void i915_restore_display(struct drm_device *dev); extern int i915_master_create(struct drm_device *dev, struct drm_master *master); extern void i915_master_destroy(struct drm_device *dev, struct drm_master *master); @@ -604,8 +656,10 @@ extern long i915_compat_ioctl(struct file *filp, unsigned int cmd, extern int i915_emit_box(struct drm_device *dev, struct drm_clip_rect *boxes, int i, int DR1, int DR4); +extern int i965_reset(struct drm_device *dev, u8 flags); /* i915_irq.c */ +void i915_hangcheck_elapsed(unsigned long data); extern int i915_irq_emit(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int i915_irq_wait(struct drm_device *dev, void *data, @@ -676,6 +730,8 @@ int i915_gem_busy_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); int i915_gem_throttle_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); +int i915_gem_madvise_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); int i915_gem_entervt_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); int i915_gem_leavevt_ioctl(struct drm_device *dev, void *data, @@ -695,6 +751,7 @@ int i915_gem_object_unbind(struct drm_gem_object *obj); void i915_gem_release_mmap(struct drm_gem_object *obj); void i915_gem_lastclose(struct drm_device *dev); uint32_t i915_get_gem_seqno(struct drm_device *dev); +bool i915_seqno_passed(uint32_t seq1, uint32_t seq2); int i915_gem_object_get_fence_reg(struct drm_gem_object *obj); int i915_gem_object_put_fence_reg(struct drm_gem_object *obj); void i915_gem_retire_requests(struct drm_device *dev); @@ -720,6 +777,9 @@ int i915_gem_object_get_pages(struct drm_gem_object *obj); void i915_gem_object_put_pages(struct drm_gem_object *obj); void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv); +void i915_gem_shrinker_init(void); +void i915_gem_shrinker_exit(void); + /* i915_gem_tiling.c */ void i915_gem_detect_bit_6_swizzle(struct drm_device *dev); void i915_gem_object_do_bit_17_swizzle(struct drm_gem_object *obj); @@ -767,6 +827,8 @@ static inline void opregion_enable_asle(struct drm_device *dev) { return; } extern void intel_modeset_init(struct drm_device *dev); extern void intel_modeset_cleanup(struct drm_device *dev); extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state); +extern void i8xx_disable_fbc(struct drm_device *dev); +extern void g4x_disable_fbc(struct drm_device *dev); /** * Lock test for when it's just for synchronization of ring access. @@ -864,6 +926,7 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller); (dev)->pci_device == 0x2E12 || \ (dev)->pci_device == 0x2E22 || \ (dev)->pci_device == 0x2E32 || \ + (dev)->pci_device == 0x2E42 || \ (dev)->pci_device == 0x0042 || \ (dev)->pci_device == 0x0046) @@ -876,6 +939,7 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller); (dev)->pci_device == 0x2E12 || \ (dev)->pci_device == 0x2E22 || \ (dev)->pci_device == 0x2E32 || \ + (dev)->pci_device == 0x2E42 || \ IS_GM45(dev)) #define IS_IGDG(dev) ((dev)->pci_device == 0xa001) @@ -909,12 +973,13 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller); #define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev) || IS_IGDNG(dev)) #define SUPPORTS_INTEGRATED_DP(dev) (IS_G4X(dev) || IS_IGDNG(dev)) #define SUPPORTS_EDP(dev) (IS_IGDNG_M(dev)) -#define I915_HAS_HOTPLUG(dev) (IS_I945G(dev) || IS_I945GM(dev) || IS_I965G(dev)) +#define I915_HAS_HOTPLUG(dev) (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev) || IS_I965G(dev)) /* dsparb controlled by hw only */ #define DSPARB_HWCONTROL(dev) (IS_G4X(dev) || IS_IGDNG(dev)) #define HAS_FW_BLC(dev) (IS_I9XX(dev) || IS_G4X(dev) || IS_IGDNG(dev)) #define HAS_PIPE_CXSR(dev) (IS_G4X(dev) || IS_IGDNG(dev)) +#define I915_HAS_FBC(dev) (IS_MOBILE(dev) && (IS_I9XX(dev) || IS_I965G(dev))) #define PRIMARY_RINGBUFFER_SIZE (128*1024) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index c67317112f4a..40727d4c2919 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -29,6 +29,7 @@ #include "drm.h" #include "i915_drm.h" #include "i915_drv.h" +#include "i915_trace.h" #include "intel_drv.h" #include #include @@ -48,11 +49,15 @@ static int i915_gem_object_wait_rendering(struct drm_gem_object *obj); static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment); static void i915_gem_clear_fence_reg(struct drm_gem_object *obj); -static int i915_gem_evict_something(struct drm_device *dev); +static int i915_gem_evict_something(struct drm_device *dev, int min_size); +static int i915_gem_evict_from_inactive_list(struct drm_device *dev); static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj, struct drm_i915_gem_pwrite *args, struct drm_file *file_priv); +static LIST_HEAD(shrink_list); +static DEFINE_SPINLOCK(shrink_list_lock); + int i915_gem_do_init(struct drm_device *dev, unsigned long start, unsigned long end) { @@ -316,6 +321,45 @@ fail_unlock: return ret; } +static inline gfp_t +i915_gem_object_get_page_gfp_mask (struct drm_gem_object *obj) +{ + return mapping_gfp_mask(obj->filp->f_path.dentry->d_inode->i_mapping); +} + +static inline void +i915_gem_object_set_page_gfp_mask (struct drm_gem_object *obj, gfp_t gfp) +{ + mapping_set_gfp_mask(obj->filp->f_path.dentry->d_inode->i_mapping, gfp); +} + +static int +i915_gem_object_get_pages_or_evict(struct drm_gem_object *obj) +{ + int ret; + + ret = i915_gem_object_get_pages(obj); + + /* If we've insufficient memory to map in the pages, attempt + * to make some space by throwing out some old buffers. + */ + if (ret == -ENOMEM) { + struct drm_device *dev = obj->dev; + gfp_t gfp; + + ret = i915_gem_evict_something(dev, obj->size); + if (ret) + return ret; + + gfp = i915_gem_object_get_page_gfp_mask(obj); + i915_gem_object_set_page_gfp_mask(obj, gfp & ~__GFP_NORETRY); + ret = i915_gem_object_get_pages(obj); + i915_gem_object_set_page_gfp_mask (obj, gfp); + } + + return ret; +} + /** * This is the fallback shmem pread path, which allocates temporary storage * in kernel space to copy_to_user into outside of the struct_mutex, so we @@ -367,8 +411,8 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj, mutex_lock(&dev->struct_mutex); - ret = i915_gem_object_get_pages(obj); - if (ret != 0) + ret = i915_gem_object_get_pages_or_evict(obj); + if (ret) goto fail_unlock; ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset, @@ -842,8 +886,8 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj, mutex_lock(&dev->struct_mutex); - ret = i915_gem_object_get_pages(obj); - if (ret != 0) + ret = i915_gem_object_get_pages_or_evict(obj); + if (ret) goto fail_unlock; ret = i915_gem_object_set_to_cpu_domain(obj, 1); @@ -1155,28 +1199,22 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) /* Now bind it into the GTT if needed */ mutex_lock(&dev->struct_mutex); if (!obj_priv->gtt_space) { - ret = i915_gem_object_bind_to_gtt(obj, obj_priv->gtt_alignment); - if (ret) { - mutex_unlock(&dev->struct_mutex); - return VM_FAULT_SIGBUS; - } - - ret = i915_gem_object_set_to_gtt_domain(obj, write); - if (ret) { - mutex_unlock(&dev->struct_mutex); - return VM_FAULT_SIGBUS; - } + ret = i915_gem_object_bind_to_gtt(obj, 0); + if (ret) + goto unlock; list_add_tail(&obj_priv->list, &dev_priv->mm.inactive_list); + + ret = i915_gem_object_set_to_gtt_domain(obj, write); + if (ret) + goto unlock; } /* Need a new fence register? */ if (obj_priv->tiling_mode != I915_TILING_NONE) { ret = i915_gem_object_get_fence_reg(obj); - if (ret) { - mutex_unlock(&dev->struct_mutex); - return VM_FAULT_SIGBUS; - } + if (ret) + goto unlock; } pfn = ((dev->agp->base + obj_priv->gtt_offset) >> PAGE_SHIFT) + @@ -1184,18 +1222,18 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) /* Finally, remap it using the new GTT offset */ ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn); - +unlock: mutex_unlock(&dev->struct_mutex); switch (ret) { + case 0: + case -ERESTARTSYS: + return VM_FAULT_NOPAGE; case -ENOMEM: case -EAGAIN: return VM_FAULT_OOM; - case -EFAULT: - case -EINVAL: - return VM_FAULT_SIGBUS; default: - return VM_FAULT_NOPAGE; + return VM_FAULT_SIGBUS; } } @@ -1388,6 +1426,14 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data, obj_priv = obj->driver_private; + if (obj_priv->madv != I915_MADV_WILLNEED) { + DRM_ERROR("Attempting to mmap a purgeable buffer\n"); + drm_gem_object_unreference(obj); + mutex_unlock(&dev->struct_mutex); + return -EINVAL; + } + + if (!obj_priv->mmap_offset) { ret = i915_gem_create_mmap_offset(obj); if (ret) { @@ -1399,22 +1445,12 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data, args->offset = obj_priv->mmap_offset; - obj_priv->gtt_alignment = i915_gem_get_gtt_alignment(obj); - - /* Make sure the alignment is correct for fence regs etc */ - if (obj_priv->agp_mem && - (obj_priv->gtt_offset & (obj_priv->gtt_alignment - 1))) { - drm_gem_object_unreference(obj); - mutex_unlock(&dev->struct_mutex); - return -EINVAL; - } - /* * Pull it into the GTT so that we have a page list (makes the * initial fault faster and any subsequent flushing possible). */ if (!obj_priv->agp_mem) { - ret = i915_gem_object_bind_to_gtt(obj, obj_priv->gtt_alignment); + ret = i915_gem_object_bind_to_gtt(obj, 0); if (ret) { drm_gem_object_unreference(obj); mutex_unlock(&dev->struct_mutex); @@ -1437,6 +1473,7 @@ i915_gem_object_put_pages(struct drm_gem_object *obj) int i; BUG_ON(obj_priv->pages_refcount == 0); + BUG_ON(obj_priv->madv == __I915_MADV_PURGED); if (--obj_priv->pages_refcount != 0) return; @@ -1444,13 +1481,21 @@ i915_gem_object_put_pages(struct drm_gem_object *obj) if (obj_priv->tiling_mode != I915_TILING_NONE) i915_gem_object_save_bit_17_swizzle(obj); - for (i = 0; i < page_count; i++) - if (obj_priv->pages[i] != NULL) { - if (obj_priv->dirty) - set_page_dirty(obj_priv->pages[i]); + if (obj_priv->madv == I915_MADV_DONTNEED) + obj_priv->dirty = 0; + + for (i = 0; i < page_count; i++) { + if (obj_priv->pages[i] == NULL) + break; + + if (obj_priv->dirty) + set_page_dirty(obj_priv->pages[i]); + + if (obj_priv->madv == I915_MADV_WILLNEED) mark_page_accessed(obj_priv->pages[i]); - page_cache_release(obj_priv->pages[i]); - } + + page_cache_release(obj_priv->pages[i]); + } obj_priv->dirty = 0; drm_free_large(obj_priv->pages); @@ -1489,6 +1534,26 @@ i915_gem_object_move_to_flushing(struct drm_gem_object *obj) obj_priv->last_rendering_seqno = 0; } +/* Immediately discard the backing storage */ +static void +i915_gem_object_truncate(struct drm_gem_object *obj) +{ + struct drm_i915_gem_object *obj_priv = obj->driver_private; + struct inode *inode; + + inode = obj->filp->f_path.dentry->d_inode; + if (inode->i_op->truncate) + inode->i_op->truncate (inode); + + obj_priv->madv = __I915_MADV_PURGED; +} + +static inline int +i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj_priv) +{ + return obj_priv->madv == I915_MADV_DONTNEED; +} + static void i915_gem_object_move_to_inactive(struct drm_gem_object *obj) { @@ -1577,15 +1642,24 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv, if ((obj->write_domain & flush_domains) == obj->write_domain) { + uint32_t old_write_domain = obj->write_domain; + obj->write_domain = 0; i915_gem_object_move_to_active(obj, seqno); + + trace_i915_gem_object_change_domain(obj, + obj->read_domains, + old_write_domain); } } } - if (was_empty && !dev_priv->mm.suspended) - queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ); + if (!dev_priv->mm.suspended) { + mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD); + if (was_empty) + queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ); + } return seqno; } @@ -1623,6 +1697,8 @@ i915_gem_retire_request(struct drm_device *dev, { drm_i915_private_t *dev_priv = dev->dev_private; + trace_i915_gem_request_retire(dev, request->seqno); + /* Move any buffers on the active list that are no longer referenced * by the ringbuffer to the flushing/inactive lists as appropriate. */ @@ -1671,7 +1747,7 @@ out: /** * Returns true if seq1 is later than seq2. */ -static int +bool i915_seqno_passed(uint32_t seq1, uint32_t seq2) { return (int32_t)(seq1 - seq2) >= 0; @@ -1709,7 +1785,7 @@ i915_gem_retire_requests(struct drm_device *dev) retiring_seqno = request->seqno; if (i915_seqno_passed(seqno, retiring_seqno) || - dev_priv->mm.wedged) { + atomic_read(&dev_priv->mm.wedged)) { i915_gem_retire_request(dev, request); list_del(&request->list); @@ -1751,6 +1827,9 @@ i915_wait_request(struct drm_device *dev, uint32_t seqno) BUG_ON(seqno == 0); + if (atomic_read(&dev_priv->mm.wedged)) + return -EIO; + if (!i915_seqno_passed(i915_get_gem_seqno(dev), seqno)) { if (IS_IGDNG(dev)) ier = I915_READ(DEIER) | I915_READ(GTIER); @@ -1763,16 +1842,20 @@ i915_wait_request(struct drm_device *dev, uint32_t seqno) i915_driver_irq_postinstall(dev); } + trace_i915_gem_request_wait_begin(dev, seqno); + dev_priv->mm.waiting_gem_seqno = seqno; i915_user_irq_get(dev); ret = wait_event_interruptible(dev_priv->irq_queue, i915_seqno_passed(i915_get_gem_seqno(dev), seqno) || - dev_priv->mm.wedged); + atomic_read(&dev_priv->mm.wedged)); i915_user_irq_put(dev); dev_priv->mm.waiting_gem_seqno = 0; + + trace_i915_gem_request_wait_end(dev, seqno); } - if (dev_priv->mm.wedged) + if (atomic_read(&dev_priv->mm.wedged)) ret = -EIO; if (ret && ret != -ERESTARTSYS) @@ -1803,6 +1886,8 @@ i915_gem_flush(struct drm_device *dev, DRM_INFO("%s: invalidate %08x flush %08x\n", __func__, invalidate_domains, flush_domains); #endif + trace_i915_gem_request_flush(dev, dev_priv->mm.next_gem_seqno, + invalidate_domains, flush_domains); if (flush_domains & I915_GEM_DOMAIN_CPU) drm_agp_chipset_flush(dev); @@ -1915,6 +2000,12 @@ i915_gem_object_unbind(struct drm_gem_object *obj) return -EINVAL; } + /* blow away mappings if mapped through GTT */ + i915_gem_release_mmap(obj); + + if (obj_priv->fence_reg != I915_FENCE_REG_NONE) + i915_gem_clear_fence_reg(obj); + /* Move the object to the CPU domain to ensure that * any possible CPU writes while it's not in the GTT * are flushed when we go to remap it. This will @@ -1928,21 +2019,16 @@ i915_gem_object_unbind(struct drm_gem_object *obj) return ret; } + BUG_ON(obj_priv->active); + if (obj_priv->agp_mem != NULL) { drm_unbind_agp(obj_priv->agp_mem); drm_free_agp(obj_priv->agp_mem, obj->size / PAGE_SIZE); obj_priv->agp_mem = NULL; } - BUG_ON(obj_priv->active); - - /* blow away mappings if mapped through GTT */ - i915_gem_release_mmap(obj); - - if (obj_priv->fence_reg != I915_FENCE_REG_NONE) - i915_gem_clear_fence_reg(obj); - i915_gem_object_put_pages(obj); + BUG_ON(obj_priv->pages_refcount); if (obj_priv->gtt_space) { atomic_dec(&dev->gtt_count); @@ -1956,40 +2042,113 @@ i915_gem_object_unbind(struct drm_gem_object *obj) if (!list_empty(&obj_priv->list)) list_del_init(&obj_priv->list); + if (i915_gem_object_is_purgeable(obj_priv)) + i915_gem_object_truncate(obj); + + trace_i915_gem_object_unbind(obj); + + return 0; +} + +static struct drm_gem_object * +i915_gem_find_inactive_object(struct drm_device *dev, int min_size) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_gem_object *obj_priv; + struct drm_gem_object *best = NULL; + struct drm_gem_object *first = NULL; + + /* Try to find the smallest clean object */ + list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) { + struct drm_gem_object *obj = obj_priv->obj; + if (obj->size >= min_size) { + if ((!obj_priv->dirty || + i915_gem_object_is_purgeable(obj_priv)) && + (!best || obj->size < best->size)) { + best = obj; + if (best->size == min_size) + return best; + } + if (!first) + first = obj; + } + } + + return best ? best : first; +} + +static int +i915_gem_evict_everything(struct drm_device *dev) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + uint32_t seqno; + int ret; + bool lists_empty; + + spin_lock(&dev_priv->mm.active_list_lock); + lists_empty = (list_empty(&dev_priv->mm.inactive_list) && + list_empty(&dev_priv->mm.flushing_list) && + list_empty(&dev_priv->mm.active_list)); + spin_unlock(&dev_priv->mm.active_list_lock); + + if (lists_empty) + return -ENOSPC; + + /* Flush everything (on to the inactive lists) and evict */ + i915_gem_flush(dev, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS); + seqno = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS); + if (seqno == 0) + return -ENOMEM; + + ret = i915_wait_request(dev, seqno); + if (ret) + return ret; + + ret = i915_gem_evict_from_inactive_list(dev); + if (ret) + return ret; + + spin_lock(&dev_priv->mm.active_list_lock); + lists_empty = (list_empty(&dev_priv->mm.inactive_list) && + list_empty(&dev_priv->mm.flushing_list) && + list_empty(&dev_priv->mm.active_list)); + spin_unlock(&dev_priv->mm.active_list_lock); + BUG_ON(!lists_empty); + return 0; } static int -i915_gem_evict_something(struct drm_device *dev) +i915_gem_evict_something(struct drm_device *dev, int min_size) { drm_i915_private_t *dev_priv = dev->dev_private; struct drm_gem_object *obj; - struct drm_i915_gem_object *obj_priv; - int ret = 0; + int ret; for (;;) { + i915_gem_retire_requests(dev); + /* If there's an inactive buffer available now, grab it * and be done. */ - if (!list_empty(&dev_priv->mm.inactive_list)) { - obj_priv = list_first_entry(&dev_priv->mm.inactive_list, - struct drm_i915_gem_object, - list); - obj = obj_priv->obj; - BUG_ON(obj_priv->pin_count != 0); + obj = i915_gem_find_inactive_object(dev, min_size); + if (obj) { + struct drm_i915_gem_object *obj_priv; + #if WATCH_LRU DRM_INFO("%s: evicting %p\n", __func__, obj); #endif + obj_priv = obj->driver_private; + BUG_ON(obj_priv->pin_count != 0); BUG_ON(obj_priv->active); /* Wait on the rendering and unbind the buffer. */ - ret = i915_gem_object_unbind(obj); - break; + return i915_gem_object_unbind(obj); } /* If we didn't get anything, but the ring is still processing - * things, wait for one of those things to finish and hopefully - * leave us a buffer to evict. + * things, wait for the next to finish and hopefully leave us + * a buffer to evict. */ if (!list_empty(&dev_priv->mm.request_list)) { struct drm_i915_gem_request *request; @@ -2000,16 +2159,9 @@ i915_gem_evict_something(struct drm_device *dev) ret = i915_wait_request(dev, request->seqno); if (ret) - break; + return ret; - /* if waiting caused an object to become inactive, - * then loop around and wait for it. Otherwise, we - * assume that waiting freed and unbound something, - * so there should now be some space in the GTT - */ - if (!list_empty(&dev_priv->mm.inactive_list)) - continue; - break; + continue; } /* If we didn't have anything on the request list but there @@ -2018,46 +2170,44 @@ i915_gem_evict_something(struct drm_device *dev) * will get moved to inactive. */ if (!list_empty(&dev_priv->mm.flushing_list)) { - obj_priv = list_first_entry(&dev_priv->mm.flushing_list, - struct drm_i915_gem_object, - list); - obj = obj_priv->obj; + struct drm_i915_gem_object *obj_priv; - i915_gem_flush(dev, - obj->write_domain, - obj->write_domain); - i915_add_request(dev, NULL, obj->write_domain); + /* Find an object that we can immediately reuse */ + list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, list) { + obj = obj_priv->obj; + if (obj->size >= min_size) + break; - obj = NULL; - continue; + obj = NULL; + } + + if (obj != NULL) { + uint32_t seqno; + + i915_gem_flush(dev, + obj->write_domain, + obj->write_domain); + seqno = i915_add_request(dev, NULL, obj->write_domain); + if (seqno == 0) + return -ENOMEM; + + ret = i915_wait_request(dev, seqno); + if (ret) + return ret; + + continue; + } } - DRM_ERROR("inactive empty %d request empty %d " - "flushing empty %d\n", - list_empty(&dev_priv->mm.inactive_list), - list_empty(&dev_priv->mm.request_list), - list_empty(&dev_priv->mm.flushing_list)); - /* If we didn't do any of the above, there's nothing to be done - * and we just can't fit it in. + /* If we didn't do any of the above, there's no single buffer + * large enough to swap out for the new one, so just evict + * everything and start again. (This should be rare.) */ - return -ENOSPC; + if (!list_empty (&dev_priv->mm.inactive_list)) + return i915_gem_evict_from_inactive_list(dev); + else + return i915_gem_evict_everything(dev); } - return ret; -} - -static int -i915_gem_evict_everything(struct drm_device *dev) -{ - int ret; - - for (;;) { - ret = i915_gem_evict_something(dev); - if (ret != 0) - break; - } - if (ret == -ENOSPC) - return 0; - return ret; } int @@ -2080,7 +2230,6 @@ i915_gem_object_get_pages(struct drm_gem_object *obj) BUG_ON(obj_priv->pages != NULL); obj_priv->pages = drm_calloc_large(page_count, sizeof(struct page *)); if (obj_priv->pages == NULL) { - DRM_ERROR("Faled to allocate page list\n"); obj_priv->pages_refcount--; return -ENOMEM; } @@ -2091,7 +2240,6 @@ i915_gem_object_get_pages(struct drm_gem_object *obj) page = read_mapping_page(mapping, i, NULL); if (IS_ERR(page)) { ret = PTR_ERR(page); - DRM_ERROR("read_mapping_page failed: %d\n", ret); i915_gem_object_put_pages(obj); return ret; } @@ -2328,6 +2476,8 @@ i915_gem_object_get_fence_reg(struct drm_gem_object *obj) else i830_write_fence_reg(reg); + trace_i915_gem_object_get_fence(obj, i, obj_priv->tiling_mode); + return 0; } @@ -2410,10 +2560,17 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment) drm_i915_private_t *dev_priv = dev->dev_private; struct drm_i915_gem_object *obj_priv = obj->driver_private; struct drm_mm_node *free_space; - int page_count, ret; + bool retry_alloc = false; + int ret; if (dev_priv->mm.suspended) return -EBUSY; + + if (obj_priv->madv != I915_MADV_WILLNEED) { + DRM_ERROR("Attempting to bind a purgeable object\n"); + return -EINVAL; + } + if (alignment == 0) alignment = i915_gem_get_gtt_alignment(obj); if (alignment & (i915_gem_get_gtt_alignment(obj) - 1)) { @@ -2433,30 +2590,16 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment) } } if (obj_priv->gtt_space == NULL) { - bool lists_empty; - /* If the gtt is empty and we're still having trouble * fitting our object in, we're out of memory. */ #if WATCH_LRU DRM_INFO("%s: GTT full, evicting something\n", __func__); #endif - spin_lock(&dev_priv->mm.active_list_lock); - lists_empty = (list_empty(&dev_priv->mm.inactive_list) && - list_empty(&dev_priv->mm.flushing_list) && - list_empty(&dev_priv->mm.active_list)); - spin_unlock(&dev_priv->mm.active_list_lock); - if (lists_empty) { - DRM_ERROR("GTT full, but LRU list empty\n"); - return -ENOSPC; - } - - ret = i915_gem_evict_something(dev); - if (ret != 0) { - if (ret != -ERESTARTSYS) - DRM_ERROR("Failed to evict a buffer %d\n", ret); + ret = i915_gem_evict_something(dev, obj->size); + if (ret) return ret; - } + goto search_free; } @@ -2464,27 +2607,56 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment) DRM_INFO("Binding object of size %zd at 0x%08x\n", obj->size, obj_priv->gtt_offset); #endif + if (retry_alloc) { + i915_gem_object_set_page_gfp_mask (obj, + i915_gem_object_get_page_gfp_mask (obj) & ~__GFP_NORETRY); + } ret = i915_gem_object_get_pages(obj); + if (retry_alloc) { + i915_gem_object_set_page_gfp_mask (obj, + i915_gem_object_get_page_gfp_mask (obj) | __GFP_NORETRY); + } if (ret) { drm_mm_put_block(obj_priv->gtt_space); obj_priv->gtt_space = NULL; + + if (ret == -ENOMEM) { + /* first try to clear up some space from the GTT */ + ret = i915_gem_evict_something(dev, obj->size); + if (ret) { + /* now try to shrink everyone else */ + if (! retry_alloc) { + retry_alloc = true; + goto search_free; + } + + return ret; + } + + goto search_free; + } + return ret; } - page_count = obj->size / PAGE_SIZE; /* Create an AGP memory structure pointing at our pages, and bind it * into the GTT. */ obj_priv->agp_mem = drm_agp_bind_pages(dev, obj_priv->pages, - page_count, + obj->size >> PAGE_SHIFT, obj_priv->gtt_offset, obj_priv->agp_type); if (obj_priv->agp_mem == NULL) { i915_gem_object_put_pages(obj); drm_mm_put_block(obj_priv->gtt_space); obj_priv->gtt_space = NULL; - return -ENOMEM; + + ret = i915_gem_evict_something(dev, obj->size); + if (ret) + return ret; + + goto search_free; } atomic_inc(&dev->gtt_count); atomic_add(obj->size, &dev->gtt_memory); @@ -2496,6 +2668,8 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment) BUG_ON(obj->read_domains & I915_GEM_GPU_DOMAINS); BUG_ON(obj->write_domain & I915_GEM_GPU_DOMAINS); + trace_i915_gem_object_bind(obj, obj_priv->gtt_offset); + return 0; } @@ -2511,15 +2685,7 @@ i915_gem_clflush_object(struct drm_gem_object *obj) if (obj_priv->pages == NULL) return; - /* XXX: The 865 in particular appears to be weird in how it handles - * cache flushing. We haven't figured it out, but the - * clflush+agp_chipset_flush doesn't appear to successfully get the - * data visible to the PGU, while wbinvd + agp_chipset_flush does. - */ - if (IS_I865G(obj->dev)) { - wbinvd(); - return; - } + trace_i915_gem_object_clflush(obj); drm_clflush_pages(obj_priv->pages, obj->size / PAGE_SIZE); } @@ -2530,21 +2696,29 @@ i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj) { struct drm_device *dev = obj->dev; uint32_t seqno; + uint32_t old_write_domain; if ((obj->write_domain & I915_GEM_GPU_DOMAINS) == 0) return; /* Queue the GPU write cache flushing we need. */ + old_write_domain = obj->write_domain; i915_gem_flush(dev, 0, obj->write_domain); seqno = i915_add_request(dev, NULL, obj->write_domain); obj->write_domain = 0; i915_gem_object_move_to_active(obj, seqno); + + trace_i915_gem_object_change_domain(obj, + obj->read_domains, + old_write_domain); } /** Flushes the GTT write domain for the object if it's dirty. */ static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj) { + uint32_t old_write_domain; + if (obj->write_domain != I915_GEM_DOMAIN_GTT) return; @@ -2552,7 +2726,12 @@ i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj) * to it immediately go to main memory as far as we know, so there's * no chipset flush. It also doesn't land in render cache. */ + old_write_domain = obj->write_domain; obj->write_domain = 0; + + trace_i915_gem_object_change_domain(obj, + obj->read_domains, + old_write_domain); } /** Flushes the CPU write domain for the object if it's dirty. */ @@ -2560,13 +2739,19 @@ static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj) { struct drm_device *dev = obj->dev; + uint32_t old_write_domain; if (obj->write_domain != I915_GEM_DOMAIN_CPU) return; i915_gem_clflush_object(obj); drm_agp_chipset_flush(dev); + old_write_domain = obj->write_domain; obj->write_domain = 0; + + trace_i915_gem_object_change_domain(obj, + obj->read_domains, + old_write_domain); } /** @@ -2579,6 +2764,7 @@ int i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write) { struct drm_i915_gem_object *obj_priv = obj->driver_private; + uint32_t old_write_domain, old_read_domains; int ret; /* Not valid to be called on unbound objects. */ @@ -2591,6 +2777,9 @@ i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write) if (ret != 0) return ret; + old_write_domain = obj->write_domain; + old_read_domains = obj->read_domains; + /* If we're writing through the GTT domain, then CPU and GPU caches * will need to be invalidated at next use. */ @@ -2609,6 +2798,10 @@ i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write) obj_priv->dirty = 1; } + trace_i915_gem_object_change_domain(obj, + old_read_domains, + old_write_domain); + return 0; } @@ -2621,6 +2814,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write) static int i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write) { + uint32_t old_write_domain, old_read_domains; int ret; i915_gem_object_flush_gpu_write_domain(obj); @@ -2636,6 +2830,9 @@ i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write) */ i915_gem_object_set_to_full_cpu_read_domain(obj); + old_write_domain = obj->write_domain; + old_read_domains = obj->read_domains; + /* Flush the CPU cache if it's still invalid. */ if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0) { i915_gem_clflush_object(obj); @@ -2656,6 +2853,10 @@ i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write) obj->write_domain = I915_GEM_DOMAIN_CPU; } + trace_i915_gem_object_change_domain(obj, + old_read_domains, + old_write_domain); + return 0; } @@ -2777,6 +2978,7 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj) struct drm_i915_gem_object *obj_priv = obj->driver_private; uint32_t invalidate_domains = 0; uint32_t flush_domains = 0; + uint32_t old_read_domains; BUG_ON(obj->pending_read_domains & I915_GEM_DOMAIN_CPU); BUG_ON(obj->pending_write_domain == I915_GEM_DOMAIN_CPU); @@ -2823,6 +3025,8 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj) i915_gem_clflush_object(obj); } + old_read_domains = obj->read_domains; + /* The actual obj->write_domain will be updated with * pending_write_domain after we emit the accumulated flush for all * of our domain changes in execbuffers (which clears objects' @@ -2841,6 +3045,10 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj) obj->read_domains, obj->write_domain, dev->invalidate_domains, dev->flush_domains); #endif + + trace_i915_gem_object_change_domain(obj, + old_read_domains, + obj->write_domain); } /** @@ -2893,6 +3101,7 @@ i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj, uint64_t offset, uint64_t size) { struct drm_i915_gem_object *obj_priv = obj->driver_private; + uint32_t old_read_domains; int i, ret; if (offset == 0 && size == obj->size) @@ -2939,8 +3148,13 @@ i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj, */ BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0); + old_read_domains = obj->read_domains; obj->read_domains |= I915_GEM_DOMAIN_CPU; + trace_i915_gem_object_change_domain(obj, + old_read_domains, + obj->write_domain); + return 0; } @@ -2984,6 +3198,21 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, } target_obj_priv = target_obj->driver_private; +#if WATCH_RELOC + DRM_INFO("%s: obj %p offset %08x target %d " + "read %08x write %08x gtt %08x " + "presumed %08x delta %08x\n", + __func__, + obj, + (int) reloc->offset, + (int) reloc->target_handle, + (int) reloc->read_domains, + (int) reloc->write_domain, + (int) target_obj_priv->gtt_offset, + (int) reloc->presumed_offset, + reloc->delta); +#endif + /* The target buffer should have appeared before us in the * exec_object list, so it should have a GTT space bound by now. */ @@ -2995,6 +3224,46 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, return -EINVAL; } + /* Validate that the target is in a valid r/w GPU domain */ + if (reloc->write_domain & I915_GEM_DOMAIN_CPU || + reloc->read_domains & I915_GEM_DOMAIN_CPU) { + DRM_ERROR("reloc with read/write CPU domains: " + "obj %p target %d offset %d " + "read %08x write %08x", + obj, reloc->target_handle, + (int) reloc->offset, + reloc->read_domains, + reloc->write_domain); + drm_gem_object_unreference(target_obj); + i915_gem_object_unpin(obj); + return -EINVAL; + } + if (reloc->write_domain && target_obj->pending_write_domain && + reloc->write_domain != target_obj->pending_write_domain) { + DRM_ERROR("Write domain conflict: " + "obj %p target %d offset %d " + "new %08x old %08x\n", + obj, reloc->target_handle, + (int) reloc->offset, + reloc->write_domain, + target_obj->pending_write_domain); + drm_gem_object_unreference(target_obj); + i915_gem_object_unpin(obj); + return -EINVAL; + } + + target_obj->pending_read_domains |= reloc->read_domains; + target_obj->pending_write_domain |= reloc->write_domain; + + /* If the relocation already has the right value in it, no + * more work needs to be done. + */ + if (target_obj_priv->gtt_offset == reloc->presumed_offset) { + drm_gem_object_unreference(target_obj); + continue; + } + + /* Check that the relocation address is valid... */ if (reloc->offset > obj->size - 4) { DRM_ERROR("Relocation beyond object bounds: " "obj %p target %d offset %d size %d.\n", @@ -3014,60 +3283,17 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, return -EINVAL; } - if (reloc->write_domain & I915_GEM_DOMAIN_CPU || - reloc->read_domains & I915_GEM_DOMAIN_CPU) { - DRM_ERROR("reloc with read/write CPU domains: " - "obj %p target %d offset %d " - "read %08x write %08x", + /* and points to somewhere within the target object. */ + if (reloc->delta >= target_obj->size) { + DRM_ERROR("Relocation beyond target object bounds: " + "obj %p target %d delta %d size %d.\n", obj, reloc->target_handle, - (int) reloc->offset, - reloc->read_domains, - reloc->write_domain); + (int) reloc->delta, (int) target_obj->size); drm_gem_object_unreference(target_obj); i915_gem_object_unpin(obj); return -EINVAL; } - if (reloc->write_domain && target_obj->pending_write_domain && - reloc->write_domain != target_obj->pending_write_domain) { - DRM_ERROR("Write domain conflict: " - "obj %p target %d offset %d " - "new %08x old %08x\n", - obj, reloc->target_handle, - (int) reloc->offset, - reloc->write_domain, - target_obj->pending_write_domain); - drm_gem_object_unreference(target_obj); - i915_gem_object_unpin(obj); - return -EINVAL; - } - -#if WATCH_RELOC - DRM_INFO("%s: obj %p offset %08x target %d " - "read %08x write %08x gtt %08x " - "presumed %08x delta %08x\n", - __func__, - obj, - (int) reloc->offset, - (int) reloc->target_handle, - (int) reloc->read_domains, - (int) reloc->write_domain, - (int) target_obj_priv->gtt_offset, - (int) reloc->presumed_offset, - reloc->delta); -#endif - - target_obj->pending_read_domains |= reloc->read_domains; - target_obj->pending_write_domain |= reloc->write_domain; - - /* If the relocation already has the right value in it, no - * more work needs to be done. - */ - if (target_obj_priv->gtt_offset == reloc->presumed_offset) { - drm_gem_object_unreference(target_obj); - continue; - } - ret = i915_gem_object_set_to_gtt_domain(obj, 1); if (ret != 0) { drm_gem_object_unreference(target_obj); @@ -3126,6 +3352,8 @@ i915_dispatch_gem_execbuffer(struct drm_device *dev, exec_start = (uint32_t) exec_offset + exec->batch_start_offset; exec_len = (uint32_t) exec->batch_len; + trace_i915_gem_request_submit(dev, dev_priv->mm.next_gem_seqno); + count = nbox ? nbox : 1; for (i = 0; i < count; i++) { @@ -3363,7 +3591,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, i915_verify_inactive(dev, __FILE__, __LINE__); - if (dev_priv->mm.wedged) { + if (atomic_read(&dev_priv->mm.wedged)) { DRM_ERROR("Execbuf while wedged\n"); mutex_unlock(&dev->struct_mutex); ret = -EIO; @@ -3421,8 +3649,23 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, /* error other than GTT full, or we've already tried again */ if (ret != -ENOSPC || pin_tries >= 1) { - if (ret != -ERESTARTSYS) - DRM_ERROR("Failed to pin buffers %d\n", ret); + if (ret != -ERESTARTSYS) { + unsigned long long total_size = 0; + for (i = 0; i < args->buffer_count; i++) + total_size += object_list[i]->size; + DRM_ERROR("Failed to pin buffer %d of %d, total %llu bytes: %d\n", + pinned+1, args->buffer_count, + total_size, ret); + DRM_ERROR("%d objects [%d pinned], " + "%d object bytes [%d pinned], " + "%d/%d gtt bytes\n", + atomic_read(&dev->object_count), + atomic_read(&dev->pin_count), + atomic_read(&dev->object_memory), + atomic_read(&dev->pin_memory), + atomic_read(&dev->gtt_memory), + dev->gtt_total); + } goto err; } @@ -3433,7 +3676,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, /* evict everyone we can from the aperture */ ret = i915_gem_evict_everything(dev); - if (ret) + if (ret && ret != -ENOSPC) goto err; } @@ -3489,8 +3732,12 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, for (i = 0; i < args->buffer_count; i++) { struct drm_gem_object *obj = object_list[i]; + uint32_t old_write_domain = obj->write_domain; obj->write_domain = obj->pending_write_domain; + trace_i915_gem_object_change_domain(obj, + obj->read_domains, + old_write_domain); } i915_verify_inactive(dev, __FILE__, __LINE__); @@ -3607,11 +3854,8 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment) i915_verify_inactive(dev, __FILE__, __LINE__); if (obj_priv->gtt_space == NULL) { ret = i915_gem_object_bind_to_gtt(obj, alignment); - if (ret != 0) { - if (ret != -EBUSY && ret != -ERESTARTSYS) - DRM_ERROR("Failure to bind: %d\n", ret); + if (ret) return ret; - } } /* * Pre-965 chips need a fence register set up in order to @@ -3691,6 +3935,13 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data, } obj_priv = obj->driver_private; + if (obj_priv->madv != I915_MADV_WILLNEED) { + DRM_ERROR("Attempting to pin a purgeable buffer\n"); + drm_gem_object_unreference(obj); + mutex_unlock(&dev->struct_mutex); + return -EINVAL; + } + if (obj_priv->pin_filp != NULL && obj_priv->pin_filp != file_priv) { DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n", args->handle); @@ -3803,6 +4054,56 @@ i915_gem_throttle_ioctl(struct drm_device *dev, void *data, return i915_gem_ring_throttle(dev, file_priv); } +int +i915_gem_madvise_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct drm_i915_gem_madvise *args = data; + struct drm_gem_object *obj; + struct drm_i915_gem_object *obj_priv; + + switch (args->madv) { + case I915_MADV_DONTNEED: + case I915_MADV_WILLNEED: + break; + default: + return -EINVAL; + } + + obj = drm_gem_object_lookup(dev, file_priv, args->handle); + if (obj == NULL) { + DRM_ERROR("Bad handle in i915_gem_madvise_ioctl(): %d\n", + args->handle); + return -EBADF; + } + + mutex_lock(&dev->struct_mutex); + obj_priv = obj->driver_private; + + if (obj_priv->pin_count) { + drm_gem_object_unreference(obj); + mutex_unlock(&dev->struct_mutex); + + DRM_ERROR("Attempted i915_gem_madvise_ioctl() on a pinned object\n"); + return -EINVAL; + } + + if (obj_priv->madv != __I915_MADV_PURGED) + obj_priv->madv = args->madv; + + /* if the object is no longer bound, discard its backing storage */ + if (i915_gem_object_is_purgeable(obj_priv) && + obj_priv->gtt_space == NULL) + i915_gem_object_truncate(obj); + + args->retained = obj_priv->madv != __I915_MADV_PURGED; + + drm_gem_object_unreference(obj); + mutex_unlock(&dev->struct_mutex); + + return 0; +} + int i915_gem_init_object(struct drm_gem_object *obj) { struct drm_i915_gem_object *obj_priv; @@ -3827,6 +4128,9 @@ int i915_gem_init_object(struct drm_gem_object *obj) obj_priv->fence_reg = I915_FENCE_REG_NONE; INIT_LIST_HEAD(&obj_priv->list); INIT_LIST_HEAD(&obj_priv->fence_list); + obj_priv->madv = I915_MADV_WILLNEED; + + trace_i915_gem_object_create(obj); return 0; } @@ -3836,6 +4140,8 @@ void i915_gem_free_object(struct drm_gem_object *obj) struct drm_device *dev = obj->dev; struct drm_i915_gem_object *obj_priv = obj->driver_private; + trace_i915_gem_object_destroy(obj); + while (obj_priv->pin_count > 0) i915_gem_object_unpin(obj); @@ -3844,43 +4150,35 @@ void i915_gem_free_object(struct drm_gem_object *obj) i915_gem_object_unbind(obj); - i915_gem_free_mmap_offset(obj); + if (obj_priv->mmap_offset) + i915_gem_free_mmap_offset(obj); kfree(obj_priv->page_cpu_valid); kfree(obj_priv->bit_17); kfree(obj->driver_private); } -/** Unbinds all objects that are on the given buffer list. */ +/** Unbinds all inactive objects. */ static int -i915_gem_evict_from_list(struct drm_device *dev, struct list_head *head) +i915_gem_evict_from_inactive_list(struct drm_device *dev) { - struct drm_gem_object *obj; - struct drm_i915_gem_object *obj_priv; - int ret; + drm_i915_private_t *dev_priv = dev->dev_private; - while (!list_empty(head)) { - obj_priv = list_first_entry(head, - struct drm_i915_gem_object, - list); - obj = obj_priv->obj; + while (!list_empty(&dev_priv->mm.inactive_list)) { + struct drm_gem_object *obj; + int ret; - if (obj_priv->pin_count != 0) { - DRM_ERROR("Pinned object in unbind list\n"); - mutex_unlock(&dev->struct_mutex); - return -EINVAL; - } + obj = list_first_entry(&dev_priv->mm.inactive_list, + struct drm_i915_gem_object, + list)->obj; ret = i915_gem_object_unbind(obj); if (ret != 0) { - DRM_ERROR("Error unbinding object in LeaveVT: %d\n", - ret); - mutex_unlock(&dev->struct_mutex); + DRM_ERROR("Error unbinding object: %d\n", ret); return ret; } } - return 0; } @@ -3902,6 +4200,7 @@ i915_gem_idle(struct drm_device *dev) * We need to replace this with a semaphore, or something. */ dev_priv->mm.suspended = 1; + del_timer(&dev_priv->hangcheck_timer); /* Cancel the retire work handler, wait for it to finish if running */ @@ -3931,7 +4230,7 @@ i915_gem_idle(struct drm_device *dev) if (last_seqno == cur_seqno) { if (stuck++ > 100) { DRM_ERROR("hardware wedged\n"); - dev_priv->mm.wedged = 1; + atomic_set(&dev_priv->mm.wedged, 1); DRM_WAKEUP(&dev_priv->irq_queue); break; } @@ -3944,7 +4243,7 @@ i915_gem_idle(struct drm_device *dev) i915_gem_retire_requests(dev); spin_lock(&dev_priv->mm.active_list_lock); - if (!dev_priv->mm.wedged) { + if (!atomic_read(&dev_priv->mm.wedged)) { /* Active and flushing should now be empty as we've * waited for a sequence higher than any pending execbuffer */ @@ -3962,29 +4261,41 @@ i915_gem_idle(struct drm_device *dev) * the GPU domains and just stuff them onto inactive. */ while (!list_empty(&dev_priv->mm.active_list)) { - struct drm_i915_gem_object *obj_priv; + struct drm_gem_object *obj; + uint32_t old_write_domain; - obj_priv = list_first_entry(&dev_priv->mm.active_list, - struct drm_i915_gem_object, - list); - obj_priv->obj->write_domain &= ~I915_GEM_GPU_DOMAINS; - i915_gem_object_move_to_inactive(obj_priv->obj); + obj = list_first_entry(&dev_priv->mm.active_list, + struct drm_i915_gem_object, + list)->obj; + old_write_domain = obj->write_domain; + obj->write_domain &= ~I915_GEM_GPU_DOMAINS; + i915_gem_object_move_to_inactive(obj); + + trace_i915_gem_object_change_domain(obj, + obj->read_domains, + old_write_domain); } spin_unlock(&dev_priv->mm.active_list_lock); while (!list_empty(&dev_priv->mm.flushing_list)) { - struct drm_i915_gem_object *obj_priv; + struct drm_gem_object *obj; + uint32_t old_write_domain; - obj_priv = list_first_entry(&dev_priv->mm.flushing_list, - struct drm_i915_gem_object, - list); - obj_priv->obj->write_domain &= ~I915_GEM_GPU_DOMAINS; - i915_gem_object_move_to_inactive(obj_priv->obj); + obj = list_first_entry(&dev_priv->mm.flushing_list, + struct drm_i915_gem_object, + list)->obj; + old_write_domain = obj->write_domain; + obj->write_domain &= ~I915_GEM_GPU_DOMAINS; + i915_gem_object_move_to_inactive(obj); + + trace_i915_gem_object_change_domain(obj, + obj->read_domains, + old_write_domain); } /* Move all inactive buffers out of the GTT. */ - ret = i915_gem_evict_from_list(dev, &dev_priv->mm.inactive_list); + ret = i915_gem_evict_from_inactive_list(dev); WARN_ON(!list_empty(&dev_priv->mm.inactive_list)); if (ret) { mutex_unlock(&dev->struct_mutex); @@ -4206,9 +4517,9 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data, if (drm_core_check_feature(dev, DRIVER_MODESET)) return 0; - if (dev_priv->mm.wedged) { + if (atomic_read(&dev_priv->mm.wedged)) { DRM_ERROR("Reenabling wedged hardware, good luck\n"); - dev_priv->mm.wedged = 0; + atomic_set(&dev_priv->mm.wedged, 0); } mutex_lock(&dev->struct_mutex); @@ -4274,6 +4585,10 @@ i915_gem_load(struct drm_device *dev) i915_gem_retire_work_handler); dev_priv->mm.next_gem_seqno = 1; + spin_lock(&shrink_list_lock); + list_add(&dev_priv->mm.shrink_list, &shrink_list); + spin_unlock(&shrink_list_lock); + /* Old X drivers will take 0-2 for front, back, depth buffers */ dev_priv->fence_reg_start = 3; @@ -4491,3 +4806,116 @@ void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv) list_del_init(i915_file_priv->mm.request_list.next); mutex_unlock(&dev->struct_mutex); } + +static int +i915_gem_shrink(int nr_to_scan, gfp_t gfp_mask) +{ + drm_i915_private_t *dev_priv, *next_dev; + struct drm_i915_gem_object *obj_priv, *next_obj; + int cnt = 0; + int would_deadlock = 1; + + /* "fast-path" to count number of available objects */ + if (nr_to_scan == 0) { + spin_lock(&shrink_list_lock); + list_for_each_entry(dev_priv, &shrink_list, mm.shrink_list) { + struct drm_device *dev = dev_priv->dev; + + if (mutex_trylock(&dev->struct_mutex)) { + list_for_each_entry(obj_priv, + &dev_priv->mm.inactive_list, + list) + cnt++; + mutex_unlock(&dev->struct_mutex); + } + } + spin_unlock(&shrink_list_lock); + + return (cnt / 100) * sysctl_vfs_cache_pressure; + } + + spin_lock(&shrink_list_lock); + + /* first scan for clean buffers */ + list_for_each_entry_safe(dev_priv, next_dev, + &shrink_list, mm.shrink_list) { + struct drm_device *dev = dev_priv->dev; + + if (! mutex_trylock(&dev->struct_mutex)) + continue; + + spin_unlock(&shrink_list_lock); + + i915_gem_retire_requests(dev); + + list_for_each_entry_safe(obj_priv, next_obj, + &dev_priv->mm.inactive_list, + list) { + if (i915_gem_object_is_purgeable(obj_priv)) { + i915_gem_object_unbind(obj_priv->obj); + if (--nr_to_scan <= 0) + break; + } + } + + spin_lock(&shrink_list_lock); + mutex_unlock(&dev->struct_mutex); + + would_deadlock = 0; + + if (nr_to_scan <= 0) + break; + } + + /* second pass, evict/count anything still on the inactive list */ + list_for_each_entry_safe(dev_priv, next_dev, + &shrink_list, mm.shrink_list) { + struct drm_device *dev = dev_priv->dev; + + if (! mutex_trylock(&dev->struct_mutex)) + continue; + + spin_unlock(&shrink_list_lock); + + list_for_each_entry_safe(obj_priv, next_obj, + &dev_priv->mm.inactive_list, + list) { + if (nr_to_scan > 0) { + i915_gem_object_unbind(obj_priv->obj); + nr_to_scan--; + } else + cnt++; + } + + spin_lock(&shrink_list_lock); + mutex_unlock(&dev->struct_mutex); + + would_deadlock = 0; + } + + spin_unlock(&shrink_list_lock); + + if (would_deadlock) + return -1; + else if (cnt > 0) + return (cnt / 100) * sysctl_vfs_cache_pressure; + else + return 0; +} + +static struct shrinker shrinker = { + .shrink = i915_gem_shrink, + .seeks = DEFAULT_SEEKS, +}; + +__init void +i915_gem_shrinker_init(void) +{ + register_shrinker(&shrinker); +} + +__exit void +i915_gem_shrinker_exit(void) +{ + unregister_shrinker(&shrinker); +} diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 6c89f2ff2495..4dfeec7cdd42 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -31,6 +31,7 @@ #include "drm.h" #include "i915_drm.h" #include "i915_drv.h" +#include "i915_trace.h" #include "intel_drv.h" #define MAX_NOPID ((u32)~0) @@ -279,7 +280,9 @@ irqreturn_t igdng_irq_handler(struct drm_device *dev) } if (gt_iir & GT_USER_INTERRUPT) { - dev_priv->mm.irq_gem_seqno = i915_get_gem_seqno(dev); + u32 seqno = i915_get_gem_seqno(dev); + dev_priv->mm.irq_gem_seqno = seqno; + trace_i915_gem_request_complete(dev, seqno); DRM_WAKEUP(&dev_priv->irq_queue); } @@ -302,12 +305,25 @@ static void i915_error_work_func(struct work_struct *work) drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, error_work); struct drm_device *dev = dev_priv->dev; - char *event_string = "ERROR=1"; - char *envp[] = { event_string, NULL }; + char *error_event[] = { "ERROR=1", NULL }; + char *reset_event[] = { "RESET=1", NULL }; + char *reset_done_event[] = { "ERROR=0", NULL }; DRM_DEBUG("generating error event\n"); + kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event); - kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, envp); + if (atomic_read(&dev_priv->mm.wedged)) { + if (IS_I965G(dev)) { + DRM_DEBUG("resetting chip\n"); + kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_event); + if (!i965_reset(dev, GDRST_RENDER)) { + atomic_set(&dev_priv->mm.wedged, 0); + kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_done_event); + } + } else { + printk("reboot required\n"); + } + } } /** @@ -372,7 +388,7 @@ out: * so userspace knows something bad happened (should trigger collection * of a ring dump etc.). */ -static void i915_handle_error(struct drm_device *dev) +static void i915_handle_error(struct drm_device *dev, bool wedged) { struct drm_i915_private *dev_priv = dev->dev_private; u32 eir = I915_READ(EIR); @@ -482,6 +498,16 @@ static void i915_handle_error(struct drm_device *dev) I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); } + if (wedged) { + atomic_set(&dev_priv->mm.wedged, 1); + + /* + * Wakeup waiting processes so they don't hang + */ + printk("i915: Waking up sleeping processes\n"); + DRM_WAKEUP(&dev_priv->irq_queue); + } + queue_work(dev_priv->wq, &dev_priv->error_work); } @@ -527,7 +553,7 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) pipeb_stats = I915_READ(PIPEBSTAT); if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) - i915_handle_error(dev); + i915_handle_error(dev, false); /* * Clear the PIPE(A|B)STAT regs before the IIR @@ -599,8 +625,12 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) } if (iir & I915_USER_INTERRUPT) { - dev_priv->mm.irq_gem_seqno = i915_get_gem_seqno(dev); + u32 seqno = i915_get_gem_seqno(dev); + dev_priv->mm.irq_gem_seqno = seqno; + trace_i915_gem_request_complete(dev, seqno); DRM_WAKEUP(&dev_priv->irq_queue); + dev_priv->hangcheck_count = 0; + mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD); } if (pipea_stats & vblank_status) { @@ -880,6 +910,52 @@ int i915_vblank_swap(struct drm_device *dev, void *data, return -EINVAL; } +struct drm_i915_gem_request *i915_get_tail_request(struct drm_device *dev) { + drm_i915_private_t *dev_priv = dev->dev_private; + return list_entry(dev_priv->mm.request_list.prev, struct drm_i915_gem_request, list); +} + +/** + * This is called when the chip hasn't reported back with completed + * batchbuffers in a long time. The first time this is called we simply record + * ACTHD. If ACTHD hasn't changed by the time the hangcheck timer elapses + * again, we assume the chip is wedged and try to fix it. + */ +void i915_hangcheck_elapsed(unsigned long data) +{ + struct drm_device *dev = (struct drm_device *)data; + drm_i915_private_t *dev_priv = dev->dev_private; + uint32_t acthd; + + if (!IS_I965G(dev)) + acthd = I915_READ(ACTHD); + else + acthd = I915_READ(ACTHD_I965); + + /* If all work is done then ACTHD clearly hasn't advanced. */ + if (list_empty(&dev_priv->mm.request_list) || + i915_seqno_passed(i915_get_gem_seqno(dev), i915_get_tail_request(dev)->seqno)) { + dev_priv->hangcheck_count = 0; + return; + } + + if (dev_priv->last_acthd == acthd && dev_priv->hangcheck_count > 0) { + DRM_ERROR("Hangcheck timer elapsed... GPU hung\n"); + i915_handle_error(dev, true); + return; + } + + /* Reset timer case chip hangs without another request being added */ + mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD); + + if (acthd != dev_priv->last_acthd) + dev_priv->hangcheck_count = 0; + else + dev_priv->hangcheck_count++; + + dev_priv->last_acthd = acthd; +} + /* drm_dma.h hooks */ static void igdng_irq_preinstall(struct drm_device *dev) diff --git a/drivers/gpu/drm/i915/i915_opregion.c b/drivers/gpu/drm/i915/i915_opregion.c index e4b4e8898e39..2d5193556d3f 100644 --- a/drivers/gpu/drm/i915/i915_opregion.c +++ b/drivers/gpu/drm/i915/i915_opregion.c @@ -148,6 +148,7 @@ static u32 asle_set_backlight(struct drm_device *dev, u32 bclp) struct drm_i915_private *dev_priv = dev->dev_private; struct opregion_asle *asle = dev_priv->opregion.asle; u32 blc_pwm_ctl, blc_pwm_ctl2; + u32 max_backlight, level, shift; if (!(bclp & ASLE_BCLP_VALID)) return ASLE_BACKLIGHT_FAIL; @@ -157,14 +158,25 @@ static u32 asle_set_backlight(struct drm_device *dev, u32 bclp) return ASLE_BACKLIGHT_FAIL; blc_pwm_ctl = I915_READ(BLC_PWM_CTL); - blc_pwm_ctl &= ~BACKLIGHT_DUTY_CYCLE_MASK; blc_pwm_ctl2 = I915_READ(BLC_PWM_CTL2); - if (blc_pwm_ctl2 & BLM_COMBINATION_MODE) + if (IS_I965G(dev) && (blc_pwm_ctl2 & BLM_COMBINATION_MODE)) pci_write_config_dword(dev->pdev, PCI_LBPC, bclp); - else - I915_WRITE(BLC_PWM_CTL, blc_pwm_ctl | ((bclp * 0x101)-1)); - + else { + if (IS_IGD(dev)) { + blc_pwm_ctl &= ~(BACKLIGHT_DUTY_CYCLE_MASK - 1); + max_backlight = (blc_pwm_ctl & BACKLIGHT_MODULATION_FREQ_MASK) >> + BACKLIGHT_MODULATION_FREQ_SHIFT; + shift = BACKLIGHT_DUTY_CYCLE_SHIFT + 1; + } else { + blc_pwm_ctl &= ~BACKLIGHT_DUTY_CYCLE_MASK; + max_backlight = ((blc_pwm_ctl & BACKLIGHT_MODULATION_FREQ_MASK) >> + BACKLIGHT_MODULATION_FREQ_SHIFT) * 2; + shift = BACKLIGHT_DUTY_CYCLE_SHIFT; + } + level = (bclp * max_backlight) / 255; + I915_WRITE(BLC_PWM_CTL, blc_pwm_ctl | (level << shift)); + } asle->cblv = (bclp*0x64)/0xff | ASLE_CBLV_VALID; return 0; diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 3f7963553464..0466ddbeba32 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -86,6 +86,10 @@ #define I915_GC_RENDER_CLOCK_200_MHZ (1 << 0) #define I915_GC_RENDER_CLOCK_333_MHZ (4 << 0) #define LBB 0xf4 +#define GDRST 0xc0 +#define GDRST_FULL (0<<2) +#define GDRST_RENDER (1<<2) +#define GDRST_MEDIA (3<<2) /* VGA stuff */ @@ -344,9 +348,37 @@ #define FBC_CTL_PLANEA (0<<0) #define FBC_CTL_PLANEB (1<<0) #define FBC_FENCE_OFF 0x0321b +#define FBC_TAG 0x03300 #define FBC_LL_SIZE (1536) +/* Framebuffer compression for GM45+ */ +#define DPFC_CB_BASE 0x3200 +#define DPFC_CONTROL 0x3208 +#define DPFC_CTL_EN (1<<31) +#define DPFC_CTL_PLANEA (0<<30) +#define DPFC_CTL_PLANEB (1<<30) +#define DPFC_CTL_FENCE_EN (1<<29) +#define DPFC_SR_EN (1<<10) +#define DPFC_CTL_LIMIT_1X (0<<6) +#define DPFC_CTL_LIMIT_2X (1<<6) +#define DPFC_CTL_LIMIT_4X (2<<6) +#define DPFC_RECOMP_CTL 0x320c +#define DPFC_RECOMP_STALL_EN (1<<27) +#define DPFC_RECOMP_STALL_WM_SHIFT (16) +#define DPFC_RECOMP_STALL_WM_MASK (0x07ff0000) +#define DPFC_RECOMP_TIMER_COUNT_SHIFT (0) +#define DPFC_RECOMP_TIMER_COUNT_MASK (0x0000003f) +#define DPFC_STATUS 0x3210 +#define DPFC_INVAL_SEG_SHIFT (16) +#define DPFC_INVAL_SEG_MASK (0x07ff0000) +#define DPFC_COMP_SEG_SHIFT (0) +#define DPFC_COMP_SEG_MASK (0x000003ff) +#define DPFC_STATUS2 0x3214 +#define DPFC_FENCE_YOFF 0x3218 +#define DPFC_CHICKEN 0x3224 +#define DPFC_HT_MODIFY (1<<31) + /* * GPIO regs */ @@ -2000,6 +2032,8 @@ #define PF_ENABLE (1<<31) #define PFA_WIN_SZ 0x68074 #define PFB_WIN_SZ 0x68874 +#define PFA_WIN_POS 0x68070 +#define PFB_WIN_POS 0x68870 /* legacy palette */ #define LGC_PALETTE_A 0x4a000 diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c index 20d4d19f5568..bd6d8d91ca9f 100644 --- a/drivers/gpu/drm/i915/i915_suspend.c +++ b/drivers/gpu/drm/i915/i915_suspend.c @@ -228,6 +228,7 @@ static void i915_save_modeset_reg(struct drm_device *dev) if (drm_core_check_feature(dev, DRIVER_MODESET)) return; + /* Pipe & plane A info */ dev_priv->savePIPEACONF = I915_READ(PIPEACONF); dev_priv->savePIPEASRC = I915_READ(PIPEASRC); @@ -285,6 +286,7 @@ static void i915_save_modeset_reg(struct drm_device *dev) dev_priv->savePIPEBSTAT = I915_READ(PIPEBSTAT); return; } + static void i915_restore_modeset_reg(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -379,19 +381,10 @@ static void i915_restore_modeset_reg(struct drm_device *dev) return; } -int i915_save_state(struct drm_device *dev) + +void i915_save_display(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - int i; - - pci_read_config_byte(dev->pdev, LBB, &dev_priv->saveLBB); - - /* Render Standby */ - if (IS_I965G(dev) && IS_MOBILE(dev)) - dev_priv->saveRENDERSTANDBY = I915_READ(MCHBAR_RENDER_STANDBY); - - /* Hardware status page */ - dev_priv->saveHWS = I915_READ(HWS_PGA); /* Display arbitration control */ dev_priv->saveDSPARB = I915_READ(DSPARB); @@ -399,6 +392,7 @@ int i915_save_state(struct drm_device *dev) /* This is only meaningful in non-KMS mode */ /* Don't save them in KMS mode */ i915_save_modeset_reg(dev); + /* Cursor state */ dev_priv->saveCURACNTR = I915_READ(CURACNTR); dev_priv->saveCURAPOS = I915_READ(CURAPOS); @@ -448,81 +442,22 @@ int i915_save_state(struct drm_device *dev) dev_priv->saveFBC_CONTROL2 = I915_READ(FBC_CONTROL2); dev_priv->saveFBC_CONTROL = I915_READ(FBC_CONTROL); - /* Interrupt state */ - dev_priv->saveIIR = I915_READ(IIR); - dev_priv->saveIER = I915_READ(IER); - dev_priv->saveIMR = I915_READ(IMR); - /* VGA state */ dev_priv->saveVGA0 = I915_READ(VGA0); dev_priv->saveVGA1 = I915_READ(VGA1); dev_priv->saveVGA_PD = I915_READ(VGA_PD); dev_priv->saveVGACNTRL = I915_READ(VGACNTRL); - /* Clock gating state */ - dev_priv->saveD_STATE = I915_READ(D_STATE); - dev_priv->saveDSPCLK_GATE_D = I915_READ(DSPCLK_GATE_D); - - /* Cache mode state */ - dev_priv->saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0); - - /* Memory Arbitration state */ - dev_priv->saveMI_ARB_STATE = I915_READ(MI_ARB_STATE); - - /* Scratch space */ - for (i = 0; i < 16; i++) { - dev_priv->saveSWF0[i] = I915_READ(SWF00 + (i << 2)); - dev_priv->saveSWF1[i] = I915_READ(SWF10 + (i << 2)); - } - for (i = 0; i < 3; i++) - dev_priv->saveSWF2[i] = I915_READ(SWF30 + (i << 2)); - - /* Fences */ - if (IS_I965G(dev)) { - for (i = 0; i < 16; i++) - dev_priv->saveFENCE[i] = I915_READ64(FENCE_REG_965_0 + (i * 8)); - } else { - for (i = 0; i < 8; i++) - dev_priv->saveFENCE[i] = I915_READ(FENCE_REG_830_0 + (i * 4)); - - if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) - for (i = 0; i < 8; i++) - dev_priv->saveFENCE[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4)); - } i915_save_vga(dev); - - return 0; } -int i915_restore_state(struct drm_device *dev) +void i915_restore_display(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - int i; - - pci_write_config_byte(dev->pdev, LBB, dev_priv->saveLBB); - - /* Render Standby */ - if (IS_I965G(dev) && IS_MOBILE(dev)) - I915_WRITE(MCHBAR_RENDER_STANDBY, dev_priv->saveRENDERSTANDBY); - - /* Hardware status page */ - I915_WRITE(HWS_PGA, dev_priv->saveHWS); /* Display arbitration */ I915_WRITE(DSPARB, dev_priv->saveDSPARB); - /* Fences */ - if (IS_I965G(dev)) { - for (i = 0; i < 16; i++) - I915_WRITE64(FENCE_REG_965_0 + (i * 8), dev_priv->saveFENCE[i]); - } else { - for (i = 0; i < 8; i++) - I915_WRITE(FENCE_REG_830_0 + (i * 4), dev_priv->saveFENCE[i]); - if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) - for (i = 0; i < 8; i++) - I915_WRITE(FENCE_REG_945_8 + (i * 4), dev_priv->saveFENCE[i+8]); - } - /* Display port ratios (must be done before clock is set) */ if (SUPPORTS_INTEGRATED_DP(dev)) { I915_WRITE(PIPEA_GMCH_DATA_M, dev_priv->savePIPEA_GMCH_DATA_M); @@ -534,9 +469,11 @@ int i915_restore_state(struct drm_device *dev) I915_WRITE(PIPEA_DP_LINK_N, dev_priv->savePIPEA_DP_LINK_N); I915_WRITE(PIPEB_DP_LINK_N, dev_priv->savePIPEB_DP_LINK_N); } + /* This is only meaningful in non-KMS mode */ /* Don't restore them in KMS mode */ i915_restore_modeset_reg(dev); + /* Cursor state */ I915_WRITE(CURAPOS, dev_priv->saveCURAPOS); I915_WRITE(CURACNTR, dev_priv->saveCURACNTR); @@ -586,6 +523,95 @@ int i915_restore_state(struct drm_device *dev) I915_WRITE(VGA_PD, dev_priv->saveVGA_PD); DRM_UDELAY(150); + i915_restore_vga(dev); +} + +int i915_save_state(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + int i; + + pci_read_config_byte(dev->pdev, LBB, &dev_priv->saveLBB); + + /* Render Standby */ + if (IS_I965G(dev) && IS_MOBILE(dev)) + dev_priv->saveRENDERSTANDBY = I915_READ(MCHBAR_RENDER_STANDBY); + + /* Hardware status page */ + dev_priv->saveHWS = I915_READ(HWS_PGA); + + i915_save_display(dev); + + /* Interrupt state */ + dev_priv->saveIER = I915_READ(IER); + dev_priv->saveIMR = I915_READ(IMR); + + /* Clock gating state */ + dev_priv->saveD_STATE = I915_READ(D_STATE); + dev_priv->saveDSPCLK_GATE_D = I915_READ(DSPCLK_GATE_D); /* Not sure about this */ + + /* Cache mode state */ + dev_priv->saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0); + + /* Memory Arbitration state */ + dev_priv->saveMI_ARB_STATE = I915_READ(MI_ARB_STATE); + + /* Scratch space */ + for (i = 0; i < 16; i++) { + dev_priv->saveSWF0[i] = I915_READ(SWF00 + (i << 2)); + dev_priv->saveSWF1[i] = I915_READ(SWF10 + (i << 2)); + } + for (i = 0; i < 3; i++) + dev_priv->saveSWF2[i] = I915_READ(SWF30 + (i << 2)); + + /* Fences */ + if (IS_I965G(dev)) { + for (i = 0; i < 16; i++) + dev_priv->saveFENCE[i] = I915_READ64(FENCE_REG_965_0 + (i * 8)); + } else { + for (i = 0; i < 8; i++) + dev_priv->saveFENCE[i] = I915_READ(FENCE_REG_830_0 + (i * 4)); + + if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) + for (i = 0; i < 8; i++) + dev_priv->saveFENCE[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4)); + } + + return 0; +} + +int i915_restore_state(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + int i; + + pci_write_config_byte(dev->pdev, LBB, dev_priv->saveLBB); + + /* Render Standby */ + if (IS_I965G(dev) && IS_MOBILE(dev)) + I915_WRITE(MCHBAR_RENDER_STANDBY, dev_priv->saveRENDERSTANDBY); + + /* Hardware status page */ + I915_WRITE(HWS_PGA, dev_priv->saveHWS); + + /* Fences */ + if (IS_I965G(dev)) { + for (i = 0; i < 16; i++) + I915_WRITE64(FENCE_REG_965_0 + (i * 8), dev_priv->saveFENCE[i]); + } else { + for (i = 0; i < 8; i++) + I915_WRITE(FENCE_REG_830_0 + (i * 4), dev_priv->saveFENCE[i]); + if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) + for (i = 0; i < 8; i++) + I915_WRITE(FENCE_REG_945_8 + (i * 4), dev_priv->saveFENCE[i+8]); + } + + i915_restore_display(dev); + + /* Interrupt state */ + I915_WRITE (IER, dev_priv->saveIER); + I915_WRITE (IMR, dev_priv->saveIMR); + /* Clock gating state */ I915_WRITE (D_STATE, dev_priv->saveD_STATE); I915_WRITE (DSPCLK_GATE_D, dev_priv->saveDSPCLK_GATE_D); @@ -603,8 +629,6 @@ int i915_restore_state(struct drm_device *dev) for (i = 0; i < 3; i++) I915_WRITE(SWF30 + (i << 2), dev_priv->saveSWF2[i]); - i915_restore_vga(dev); - return 0; } diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h new file mode 100644 index 000000000000..5567a40816f3 --- /dev/null +++ b/drivers/gpu/drm/i915/i915_trace.h @@ -0,0 +1,315 @@ +#if !defined(_I915_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ) +#define _I915_TRACE_H_ + +#include +#include +#include + +#include + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM i915 +#define TRACE_SYSTEM_STRING __stringify(TRACE_SYSTEM) +#define TRACE_INCLUDE_FILE i915_trace + +/* object tracking */ + +TRACE_EVENT(i915_gem_object_create, + + TP_PROTO(struct drm_gem_object *obj), + + TP_ARGS(obj), + + TP_STRUCT__entry( + __field(struct drm_gem_object *, obj) + __field(u32, size) + ), + + TP_fast_assign( + __entry->obj = obj; + __entry->size = obj->size; + ), + + TP_printk("obj=%p, size=%u", __entry->obj, __entry->size) +); + +TRACE_EVENT(i915_gem_object_bind, + + TP_PROTO(struct drm_gem_object *obj, u32 gtt_offset), + + TP_ARGS(obj, gtt_offset), + + TP_STRUCT__entry( + __field(struct drm_gem_object *, obj) + __field(u32, gtt_offset) + ), + + TP_fast_assign( + __entry->obj = obj; + __entry->gtt_offset = gtt_offset; + ), + + TP_printk("obj=%p, gtt_offset=%08x", + __entry->obj, __entry->gtt_offset) +); + +TRACE_EVENT(i915_gem_object_clflush, + + TP_PROTO(struct drm_gem_object *obj), + + TP_ARGS(obj), + + TP_STRUCT__entry( + __field(struct drm_gem_object *, obj) + ), + + TP_fast_assign( + __entry->obj = obj; + ), + + TP_printk("obj=%p", __entry->obj) +); + +TRACE_EVENT(i915_gem_object_change_domain, + + TP_PROTO(struct drm_gem_object *obj, uint32_t old_read_domains, uint32_t old_write_domain), + + TP_ARGS(obj, old_read_domains, old_write_domain), + + TP_STRUCT__entry( + __field(struct drm_gem_object *, obj) + __field(u32, read_domains) + __field(u32, write_domain) + ), + + TP_fast_assign( + __entry->obj = obj; + __entry->read_domains = obj->read_domains | (old_read_domains << 16); + __entry->write_domain = obj->write_domain | (old_write_domain << 16); + ), + + TP_printk("obj=%p, read=%04x, write=%04x", + __entry->obj, + __entry->read_domains, __entry->write_domain) +); + +TRACE_EVENT(i915_gem_object_get_fence, + + TP_PROTO(struct drm_gem_object *obj, int fence, int tiling_mode), + + TP_ARGS(obj, fence, tiling_mode), + + TP_STRUCT__entry( + __field(struct drm_gem_object *, obj) + __field(int, fence) + __field(int, tiling_mode) + ), + + TP_fast_assign( + __entry->obj = obj; + __entry->fence = fence; + __entry->tiling_mode = tiling_mode; + ), + + TP_printk("obj=%p, fence=%d, tiling=%d", + __entry->obj, __entry->fence, __entry->tiling_mode) +); + +TRACE_EVENT(i915_gem_object_unbind, + + TP_PROTO(struct drm_gem_object *obj), + + TP_ARGS(obj), + + TP_STRUCT__entry( + __field(struct drm_gem_object *, obj) + ), + + TP_fast_assign( + __entry->obj = obj; + ), + + TP_printk("obj=%p", __entry->obj) +); + +TRACE_EVENT(i915_gem_object_destroy, + + TP_PROTO(struct drm_gem_object *obj), + + TP_ARGS(obj), + + TP_STRUCT__entry( + __field(struct drm_gem_object *, obj) + ), + + TP_fast_assign( + __entry->obj = obj; + ), + + TP_printk("obj=%p", __entry->obj) +); + +/* batch tracing */ + +TRACE_EVENT(i915_gem_request_submit, + + TP_PROTO(struct drm_device *dev, u32 seqno), + + TP_ARGS(dev, seqno), + + TP_STRUCT__entry( + __field(struct drm_device *, dev) + __field(u32, seqno) + ), + + TP_fast_assign( + __entry->dev = dev; + __entry->seqno = seqno; + ), + + TP_printk("dev=%p, seqno=%u", __entry->dev, __entry->seqno) +); + +TRACE_EVENT(i915_gem_request_flush, + + TP_PROTO(struct drm_device *dev, u32 seqno, + u32 flush_domains, u32 invalidate_domains), + + TP_ARGS(dev, seqno, flush_domains, invalidate_domains), + + TP_STRUCT__entry( + __field(struct drm_device *, dev) + __field(u32, seqno) + __field(u32, flush_domains) + __field(u32, invalidate_domains) + ), + + TP_fast_assign( + __entry->dev = dev; + __entry->seqno = seqno; + __entry->flush_domains = flush_domains; + __entry->invalidate_domains = invalidate_domains; + ), + + TP_printk("dev=%p, seqno=%u, flush=%04x, invalidate=%04x", + __entry->dev, __entry->seqno, + __entry->flush_domains, __entry->invalidate_domains) +); + + +TRACE_EVENT(i915_gem_request_complete, + + TP_PROTO(struct drm_device *dev, u32 seqno), + + TP_ARGS(dev, seqno), + + TP_STRUCT__entry( + __field(struct drm_device *, dev) + __field(u32, seqno) + ), + + TP_fast_assign( + __entry->dev = dev; + __entry->seqno = seqno; + ), + + TP_printk("dev=%p, seqno=%u", __entry->dev, __entry->seqno) +); + +TRACE_EVENT(i915_gem_request_retire, + + TP_PROTO(struct drm_device *dev, u32 seqno), + + TP_ARGS(dev, seqno), + + TP_STRUCT__entry( + __field(struct drm_device *, dev) + __field(u32, seqno) + ), + + TP_fast_assign( + __entry->dev = dev; + __entry->seqno = seqno; + ), + + TP_printk("dev=%p, seqno=%u", __entry->dev, __entry->seqno) +); + +TRACE_EVENT(i915_gem_request_wait_begin, + + TP_PROTO(struct drm_device *dev, u32 seqno), + + TP_ARGS(dev, seqno), + + TP_STRUCT__entry( + __field(struct drm_device *, dev) + __field(u32, seqno) + ), + + TP_fast_assign( + __entry->dev = dev; + __entry->seqno = seqno; + ), + + TP_printk("dev=%p, seqno=%u", __entry->dev, __entry->seqno) +); + +TRACE_EVENT(i915_gem_request_wait_end, + + TP_PROTO(struct drm_device *dev, u32 seqno), + + TP_ARGS(dev, seqno), + + TP_STRUCT__entry( + __field(struct drm_device *, dev) + __field(u32, seqno) + ), + + TP_fast_assign( + __entry->dev = dev; + __entry->seqno = seqno; + ), + + TP_printk("dev=%p, seqno=%u", __entry->dev, __entry->seqno) +); + +TRACE_EVENT(i915_ring_wait_begin, + + TP_PROTO(struct drm_device *dev), + + TP_ARGS(dev), + + TP_STRUCT__entry( + __field(struct drm_device *, dev) + ), + + TP_fast_assign( + __entry->dev = dev; + ), + + TP_printk("dev=%p", __entry->dev) +); + +TRACE_EVENT(i915_ring_wait_end, + + TP_PROTO(struct drm_device *dev), + + TP_ARGS(dev), + + TP_STRUCT__entry( + __field(struct drm_device *, dev) + ), + + TP_fast_assign( + __entry->dev = dev; + ), + + TP_printk("dev=%p", __entry->dev) +); + +#endif /* _I915_TRACE_H_ */ + +/* This part must be outside protection */ +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH ../../drivers/gpu/drm/i915 +#include diff --git a/drivers/gpu/drm/i915/i915_trace_points.c b/drivers/gpu/drm/i915/i915_trace_points.c new file mode 100644 index 000000000000..ead876eb6ea0 --- /dev/null +++ b/drivers/gpu/drm/i915/i915_trace_points.c @@ -0,0 +1,11 @@ +/* + * Copyright © 2009 Intel Corporation + * + * Authors: + * Chris Wilson + */ + +#include "i915_drv.h" + +#define CREATE_TRACE_POINTS +#include "i915_trace.h" diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c index 1e28c1652fd0..4337414846b6 100644 --- a/drivers/gpu/drm/i915/intel_bios.c +++ b/drivers/gpu/drm/i915/intel_bios.c @@ -217,6 +217,9 @@ parse_general_features(struct drm_i915_private *dev_priv, if (IS_I85X(dev_priv->dev)) dev_priv->lvds_ssc_freq = general->ssc_freq ? 66 : 48; + else if (IS_IGDNG(dev_priv->dev)) + dev_priv->lvds_ssc_freq = + general->ssc_freq ? 100 : 120; else dev_priv->lvds_ssc_freq = general->ssc_freq ? 100 : 96; diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c index 88814fa2dfd2..212e22740fc1 100644 --- a/drivers/gpu/drm/i915/intel_crt.c +++ b/drivers/gpu/drm/i915/intel_crt.c @@ -179,13 +179,10 @@ static bool intel_igdng_crt_detect_hotplug(struct drm_connector *connector) { struct drm_device *dev = connector->dev; struct drm_i915_private *dev_priv = dev->dev_private; - u32 adpa, temp; + u32 adpa; bool ret; - temp = adpa = I915_READ(PCH_ADPA); - - adpa &= ~ADPA_DAC_ENABLE; - I915_WRITE(PCH_ADPA, adpa); + adpa = I915_READ(PCH_ADPA); adpa &= ~ADPA_CRT_HOTPLUG_MASK; @@ -212,8 +209,6 @@ static bool intel_igdng_crt_detect_hotplug(struct drm_connector *connector) else ret = false; - /* restore origin register */ - I915_WRITE(PCH_ADPA, temp); return ret; } diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 0227b1652906..93ff6c03733e 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -24,6 +24,8 @@ * Eric Anholt */ +#include +#include #include #include #include "drmP.h" @@ -875,7 +877,7 @@ intel_igdng_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, refclk, best_clock); if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { - if ((I915_READ(LVDS) & LVDS_CLKB_POWER_MASK) == + if ((I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) == LVDS_CLKB_POWER_UP) clock.p2 = limit->p2.p2_fast; else @@ -952,6 +954,241 @@ intel_wait_for_vblank(struct drm_device *dev) mdelay(20); } +/* Parameters have changed, update FBC info */ +static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval) +{ + struct drm_device *dev = crtc->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_framebuffer *fb = crtc->fb; + struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); + struct drm_i915_gem_object *obj_priv = intel_fb->obj->driver_private; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + int plane, i; + u32 fbc_ctl, fbc_ctl2; + + dev_priv->cfb_pitch = dev_priv->cfb_size / FBC_LL_SIZE; + + if (fb->pitch < dev_priv->cfb_pitch) + dev_priv->cfb_pitch = fb->pitch; + + /* FBC_CTL wants 64B units */ + dev_priv->cfb_pitch = (dev_priv->cfb_pitch / 64) - 1; + dev_priv->cfb_fence = obj_priv->fence_reg; + dev_priv->cfb_plane = intel_crtc->plane; + plane = dev_priv->cfb_plane == 0 ? FBC_CTL_PLANEA : FBC_CTL_PLANEB; + + /* Clear old tags */ + for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++) + I915_WRITE(FBC_TAG + (i * 4), 0); + + /* Set it up... */ + fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | plane; + if (obj_priv->tiling_mode != I915_TILING_NONE) + fbc_ctl2 |= FBC_CTL_CPU_FENCE; + I915_WRITE(FBC_CONTROL2, fbc_ctl2); + I915_WRITE(FBC_FENCE_OFF, crtc->y); + + /* enable it... */ + fbc_ctl = FBC_CTL_EN | FBC_CTL_PERIODIC; + fbc_ctl |= (dev_priv->cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT; + fbc_ctl |= (interval & 0x2fff) << FBC_CTL_INTERVAL_SHIFT; + if (obj_priv->tiling_mode != I915_TILING_NONE) + fbc_ctl |= dev_priv->cfb_fence; + I915_WRITE(FBC_CONTROL, fbc_ctl); + + DRM_DEBUG("enabled FBC, pitch %ld, yoff %d, plane %d, ", + dev_priv->cfb_pitch, crtc->y, dev_priv->cfb_plane); +} + +void i8xx_disable_fbc(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + u32 fbc_ctl; + + if (!I915_HAS_FBC(dev)) + return; + + /* Disable compression */ + fbc_ctl = I915_READ(FBC_CONTROL); + fbc_ctl &= ~FBC_CTL_EN; + I915_WRITE(FBC_CONTROL, fbc_ctl); + + /* Wait for compressing bit to clear */ + while (I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) + ; /* nothing */ + + intel_wait_for_vblank(dev); + + DRM_DEBUG("disabled FBC\n"); +} + +static bool i8xx_fbc_enabled(struct drm_crtc *crtc) +{ + struct drm_device *dev = crtc->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + + return I915_READ(FBC_CONTROL) & FBC_CTL_EN; +} + +static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval) +{ + struct drm_device *dev = crtc->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_framebuffer *fb = crtc->fb; + struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); + struct drm_i915_gem_object *obj_priv = intel_fb->obj->driver_private; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + int plane = (intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : + DPFC_CTL_PLANEB); + unsigned long stall_watermark = 200; + u32 dpfc_ctl; + + dev_priv->cfb_pitch = (dev_priv->cfb_pitch / 64) - 1; + dev_priv->cfb_fence = obj_priv->fence_reg; + dev_priv->cfb_plane = intel_crtc->plane; + + dpfc_ctl = plane | DPFC_SR_EN | DPFC_CTL_LIMIT_1X; + if (obj_priv->tiling_mode != I915_TILING_NONE) { + dpfc_ctl |= DPFC_CTL_FENCE_EN | dev_priv->cfb_fence; + I915_WRITE(DPFC_CHICKEN, DPFC_HT_MODIFY); + } else { + I915_WRITE(DPFC_CHICKEN, ~DPFC_HT_MODIFY); + } + + I915_WRITE(DPFC_CONTROL, dpfc_ctl); + I915_WRITE(DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN | + (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) | + (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT)); + I915_WRITE(DPFC_FENCE_YOFF, crtc->y); + + /* enable it... */ + I915_WRITE(DPFC_CONTROL, I915_READ(DPFC_CONTROL) | DPFC_CTL_EN); + + DRM_DEBUG("enabled fbc on plane %d\n", intel_crtc->plane); +} + +void g4x_disable_fbc(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + u32 dpfc_ctl; + + /* Disable compression */ + dpfc_ctl = I915_READ(DPFC_CONTROL); + dpfc_ctl &= ~DPFC_CTL_EN; + I915_WRITE(DPFC_CONTROL, dpfc_ctl); + intel_wait_for_vblank(dev); + + DRM_DEBUG("disabled FBC\n"); +} + +static bool g4x_fbc_enabled(struct drm_crtc *crtc) +{ + struct drm_device *dev = crtc->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + + return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN; +} + +/** + * intel_update_fbc - enable/disable FBC as needed + * @crtc: CRTC to point the compressor at + * @mode: mode in use + * + * Set up the framebuffer compression hardware at mode set time. We + * enable it if possible: + * - plane A only (on pre-965) + * - no pixel mulitply/line duplication + * - no alpha buffer discard + * - no dual wide + * - framebuffer <= 2048 in width, 1536 in height + * + * We can't assume that any compression will take place (worst case), + * so the compressed buffer has to be the same size as the uncompressed + * one. It also must reside (along with the line length buffer) in + * stolen memory. + * + * We need to enable/disable FBC on a global basis. + */ +static void intel_update_fbc(struct drm_crtc *crtc, + struct drm_display_mode *mode) +{ + struct drm_device *dev = crtc->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_framebuffer *fb = crtc->fb; + struct intel_framebuffer *intel_fb; + struct drm_i915_gem_object *obj_priv; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + int plane = intel_crtc->plane; + + if (!i915_powersave) + return; + + if (!dev_priv->display.fbc_enabled || + !dev_priv->display.enable_fbc || + !dev_priv->display.disable_fbc) + return; + + if (!crtc->fb) + return; + + intel_fb = to_intel_framebuffer(fb); + obj_priv = intel_fb->obj->driver_private; + + /* + * If FBC is already on, we just have to verify that we can + * keep it that way... + * Need to disable if: + * - changing FBC params (stride, fence, mode) + * - new fb is too large to fit in compressed buffer + * - going to an unsupported config (interlace, pixel multiply, etc.) + */ + if (intel_fb->obj->size > dev_priv->cfb_size) { + DRM_DEBUG("framebuffer too large, disabling compression\n"); + goto out_disable; + } + if ((mode->flags & DRM_MODE_FLAG_INTERLACE) || + (mode->flags & DRM_MODE_FLAG_DBLSCAN)) { + DRM_DEBUG("mode incompatible with compression, disabling\n"); + goto out_disable; + } + if ((mode->hdisplay > 2048) || + (mode->vdisplay > 1536)) { + DRM_DEBUG("mode too large for compression, disabling\n"); + goto out_disable; + } + if ((IS_I915GM(dev) || IS_I945GM(dev)) && plane != 0) { + DRM_DEBUG("plane not 0, disabling compression\n"); + goto out_disable; + } + if (obj_priv->tiling_mode != I915_TILING_X) { + DRM_DEBUG("framebuffer not tiled, disabling compression\n"); + goto out_disable; + } + + if (dev_priv->display.fbc_enabled(crtc)) { + /* We can re-enable it in this case, but need to update pitch */ + if (fb->pitch > dev_priv->cfb_pitch) + dev_priv->display.disable_fbc(dev); + if (obj_priv->fence_reg != dev_priv->cfb_fence) + dev_priv->display.disable_fbc(dev); + if (plane != dev_priv->cfb_plane) + dev_priv->display.disable_fbc(dev); + } + + if (!dev_priv->display.fbc_enabled(crtc)) { + /* Now try to turn it back on if possible */ + dev_priv->display.enable_fbc(crtc, 500); + } + + return; + +out_disable: + DRM_DEBUG("unsupported config, disabling FBC\n"); + /* Multiple disables should be harmless */ + if (dev_priv->display.fbc_enabled(crtc)) + dev_priv->display.disable_fbc(dev); +} + static int intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, struct drm_framebuffer *old_fb) @@ -964,12 +1201,13 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, struct drm_i915_gem_object *obj_priv; struct drm_gem_object *obj; int pipe = intel_crtc->pipe; + int plane = intel_crtc->plane; unsigned long Start, Offset; - int dspbase = (pipe == 0 ? DSPAADDR : DSPBADDR); - int dspsurf = (pipe == 0 ? DSPASURF : DSPBSURF); - int dspstride = (pipe == 0) ? DSPASTRIDE : DSPBSTRIDE; - int dsptileoff = (pipe == 0 ? DSPATILEOFF : DSPBTILEOFF); - int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR; + int dspbase = (plane == 0 ? DSPAADDR : DSPBADDR); + int dspsurf = (plane == 0 ? DSPASURF : DSPBSURF); + int dspstride = (plane == 0) ? DSPASTRIDE : DSPBSTRIDE; + int dsptileoff = (plane == 0 ? DSPATILEOFF : DSPBTILEOFF); + int dspcntr_reg = (plane == 0) ? DSPACNTR : DSPBCNTR; u32 dspcntr, alignment; int ret; @@ -979,12 +1217,12 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, return 0; } - switch (pipe) { + switch (plane) { case 0: case 1: break; default: - DRM_ERROR("Can't update pipe %d in SAREA\n", pipe); + DRM_ERROR("Can't update plane %d in SAREA\n", plane); return -EINVAL; } @@ -1086,6 +1324,9 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, I915_READ(dspbase); } + if ((IS_I965G(dev) || plane == 0)) + intel_update_fbc(crtc, &crtc->mode); + intel_wait_for_vblank(dev); if (old_fb) { @@ -1217,6 +1458,7 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode) int transconf_reg = (pipe == 0) ? TRANSACONF : TRANSBCONF; int pf_ctl_reg = (pipe == 0) ? PFA_CTL_1 : PFB_CTL_1; int pf_win_size = (pipe == 0) ? PFA_WIN_SZ : PFB_WIN_SZ; + int pf_win_pos = (pipe == 0) ? PFA_WIN_POS : PFB_WIN_POS; int cpu_htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B; int cpu_hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B; int cpu_hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B; @@ -1268,6 +1510,19 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode) } } + /* Enable panel fitting for LVDS */ + if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { + temp = I915_READ(pf_ctl_reg); + I915_WRITE(pf_ctl_reg, temp | PF_ENABLE); + + /* currently full aspect */ + I915_WRITE(pf_win_pos, 0); + + I915_WRITE(pf_win_size, + (dev_priv->panel_fixed_mode->hdisplay << 16) | + (dev_priv->panel_fixed_mode->vdisplay)); + } + /* Enable CPU pipe */ temp = I915_READ(pipeconf_reg); if ((temp & PIPEACONF_ENABLE) == 0) { @@ -1532,9 +1787,10 @@ static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode) struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); int pipe = intel_crtc->pipe; + int plane = intel_crtc->plane; int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B; - int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR; - int dspbase_reg = (pipe == 0) ? DSPAADDR : DSPBADDR; + int dspcntr_reg = (plane == 0) ? DSPACNTR : DSPBCNTR; + int dspbase_reg = (plane == 0) ? DSPAADDR : DSPBADDR; int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF; u32 temp; @@ -1577,6 +1833,9 @@ static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode) intel_crtc_load_lut(crtc); + if ((IS_I965G(dev) || plane == 0)) + intel_update_fbc(crtc, &crtc->mode); + /* Give the overlay scaler a chance to enable if it's on this pipe */ //intel_crtc_dpms_video(crtc, true); TODO intel_update_watermarks(dev); @@ -1586,6 +1845,10 @@ static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode) /* Give the overlay scaler a chance to disable if it's on this pipe */ //intel_crtc_dpms_video(crtc, FALSE); TODO + if (dev_priv->cfb_plane == plane && + dev_priv->display.disable_fbc) + dev_priv->display.disable_fbc(dev); + /* Disable the VGA plane that we never use */ i915_disable_vga(dev); @@ -1634,15 +1897,13 @@ static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode) static void intel_crtc_dpms(struct drm_crtc *crtc, int mode) { struct drm_device *dev = crtc->dev; + struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_master_private *master_priv; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); int pipe = intel_crtc->pipe; bool enabled; - if (IS_IGDNG(dev)) - igdng_crtc_dpms(crtc, mode); - else - i9xx_crtc_dpms(crtc, mode); + dev_priv->display.dpms(crtc, mode); intel_crtc->dpms_mode = mode; @@ -1709,56 +1970,68 @@ static bool intel_crtc_mode_fixup(struct drm_crtc *crtc, return true; } - -/** Returns the core display clock speed for i830 - i945 */ -static int intel_get_core_clock_speed(struct drm_device *dev) +static int i945_get_display_clock_speed(struct drm_device *dev) { + return 400000; +} - /* Core clock values taken from the published datasheets. - * The 830 may go up to 166 Mhz, which we should check. - */ - if (IS_I945G(dev)) - return 400000; - else if (IS_I915G(dev)) - return 333000; - else if (IS_I945GM(dev) || IS_845G(dev) || IS_IGDGM(dev)) - return 200000; - else if (IS_I915GM(dev)) { - u16 gcfgc = 0; +static int i915_get_display_clock_speed(struct drm_device *dev) +{ + return 333000; +} - pci_read_config_word(dev->pdev, GCFGC, &gcfgc); +static int i9xx_misc_get_display_clock_speed(struct drm_device *dev) +{ + return 200000; +} - if (gcfgc & GC_LOW_FREQUENCY_ENABLE) - return 133000; - else { - switch (gcfgc & GC_DISPLAY_CLOCK_MASK) { - case GC_DISPLAY_CLOCK_333_MHZ: - return 333000; - default: - case GC_DISPLAY_CLOCK_190_200_MHZ: - return 190000; - } - } - } else if (IS_I865G(dev)) - return 266000; - else if (IS_I855(dev)) { - u16 hpllcc = 0; - /* Assume that the hardware is in the high speed state. This - * should be the default. - */ - switch (hpllcc & GC_CLOCK_CONTROL_MASK) { - case GC_CLOCK_133_200: - case GC_CLOCK_100_200: - return 200000; - case GC_CLOCK_166_250: - return 250000; - case GC_CLOCK_100_133: - return 133000; - } - } else /* 852, 830 */ +static int i915gm_get_display_clock_speed(struct drm_device *dev) +{ + u16 gcfgc = 0; + + pci_read_config_word(dev->pdev, GCFGC, &gcfgc); + + if (gcfgc & GC_LOW_FREQUENCY_ENABLE) return 133000; + else { + switch (gcfgc & GC_DISPLAY_CLOCK_MASK) { + case GC_DISPLAY_CLOCK_333_MHZ: + return 333000; + default: + case GC_DISPLAY_CLOCK_190_200_MHZ: + return 190000; + } + } +} - return 0; /* Silence gcc warning */ +static int i865_get_display_clock_speed(struct drm_device *dev) +{ + return 266000; +} + +static int i855_get_display_clock_speed(struct drm_device *dev) +{ + u16 hpllcc = 0; + /* Assume that the hardware is in the high speed state. This + * should be the default. + */ + switch (hpllcc & GC_CLOCK_CONTROL_MASK) { + case GC_CLOCK_133_200: + case GC_CLOCK_100_200: + return 200000; + case GC_CLOCK_166_250: + return 250000; + case GC_CLOCK_100_133: + return 133000; + } + + /* Shouldn't happen */ + return 0; +} + +static int i830_get_display_clock_speed(struct drm_device *dev) +{ + return 133000; } /** @@ -1921,7 +2194,14 @@ static unsigned long intel_calculate_wm(unsigned long clock_in_khz, { long entries_required, wm_size; - entries_required = (clock_in_khz * pixel_size * latency_ns) / 1000000; + /* + * Note: we need to make sure we don't overflow for various clock & + * latency values. + * clocks go from a few thousand to several hundred thousand. + * latency is usually a few thousand + */ + entries_required = ((clock_in_khz / 1000) * pixel_size * latency_ns) / + 1000; entries_required /= wm->cacheline_size; DRM_DEBUG("FIFO entries required for mode: %d\n", entries_required); @@ -1986,14 +2266,13 @@ static struct cxsr_latency *intel_get_cxsr_latency(int is_desktop, int fsb, for (i = 0; i < ARRAY_SIZE(cxsr_latency_table); i++) { latency = &cxsr_latency_table[i]; if (is_desktop == latency->is_desktop && - fsb == latency->fsb_freq && mem == latency->mem_freq) - break; + fsb == latency->fsb_freq && mem == latency->mem_freq) + return latency; } - if (i >= ARRAY_SIZE(cxsr_latency_table)) { - DRM_DEBUG("Unknown FSB/MEM found, disable CxSR\n"); - return NULL; - } - return latency; + + DRM_DEBUG("Unknown FSB/MEM found, disable CxSR\n"); + + return NULL; } static void igd_disable_cxsr(struct drm_device *dev) @@ -2084,32 +2363,17 @@ static void igd_enable_cxsr(struct drm_device *dev, unsigned long clock, */ const static int latency_ns = 5000; -static int intel_get_fifo_size(struct drm_device *dev, int plane) +static int i9xx_get_fifo_size(struct drm_device *dev, int plane) { struct drm_i915_private *dev_priv = dev->dev_private; uint32_t dsparb = I915_READ(DSPARB); int size; - if (IS_I9XX(dev)) { - if (plane == 0) - size = dsparb & 0x7f; - else - size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - - (dsparb & 0x7f); - } else if (IS_I85X(dev)) { - if (plane == 0) - size = dsparb & 0x1ff; - else - size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) - - (dsparb & 0x1ff); - size >>= 1; /* Convert to cachelines */ - } else if (IS_845G(dev)) { + if (plane == 0) size = dsparb & 0x7f; - size >>= 2; /* Convert to cachelines */ - } else { - size = dsparb & 0x7f; - size >>= 1; /* Convert to cachelines */ - } + else + size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - + (dsparb & 0x7f); DRM_DEBUG("FIFO size - (0x%08x) %s: %d\n", dsparb, plane ? "B" : "A", size); @@ -2117,7 +2381,57 @@ static int intel_get_fifo_size(struct drm_device *dev, int plane) return size; } -static void g4x_update_wm(struct drm_device *dev) +static int i85x_get_fifo_size(struct drm_device *dev, int plane) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + uint32_t dsparb = I915_READ(DSPARB); + int size; + + if (plane == 0) + size = dsparb & 0x1ff; + else + size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) - + (dsparb & 0x1ff); + size >>= 1; /* Convert to cachelines */ + + DRM_DEBUG("FIFO size - (0x%08x) %s: %d\n", dsparb, plane ? "B" : "A", + size); + + return size; +} + +static int i845_get_fifo_size(struct drm_device *dev, int plane) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + uint32_t dsparb = I915_READ(DSPARB); + int size; + + size = dsparb & 0x7f; + size >>= 2; /* Convert to cachelines */ + + DRM_DEBUG("FIFO size - (0x%08x) %s: %d\n", dsparb, plane ? "B" : "A", + size); + + return size; +} + +static int i830_get_fifo_size(struct drm_device *dev, int plane) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + uint32_t dsparb = I915_READ(DSPARB); + int size; + + size = dsparb & 0x7f; + size >>= 1; /* Convert to cachelines */ + + DRM_DEBUG("FIFO size - (0x%08x) %s: %d\n", dsparb, plane ? "B" : "A", + size); + + return size; +} + +static void g4x_update_wm(struct drm_device *dev, int unused, int unused2, + int unused3, int unused4) { struct drm_i915_private *dev_priv = dev->dev_private; u32 fw_blc_self = I915_READ(FW_BLC_SELF); @@ -2129,7 +2443,8 @@ static void g4x_update_wm(struct drm_device *dev) I915_WRITE(FW_BLC_SELF, fw_blc_self); } -static void i965_update_wm(struct drm_device *dev) +static void i965_update_wm(struct drm_device *dev, int unused, int unused2, + int unused3, int unused4) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -2165,8 +2480,8 @@ static void i9xx_update_wm(struct drm_device *dev, int planea_clock, cacheline_size = planea_params.cacheline_size; /* Update per-plane FIFO sizes */ - planea_params.fifo_size = intel_get_fifo_size(dev, 0); - planeb_params.fifo_size = intel_get_fifo_size(dev, 1); + planea_params.fifo_size = dev_priv->display.get_fifo_size(dev, 0); + planeb_params.fifo_size = dev_priv->display.get_fifo_size(dev, 1); planea_wm = intel_calculate_wm(planea_clock, &planea_params, pixel_size, latency_ns); @@ -2213,14 +2528,14 @@ static void i9xx_update_wm(struct drm_device *dev, int planea_clock, I915_WRITE(FW_BLC2, fwater_hi); } -static void i830_update_wm(struct drm_device *dev, int planea_clock, - int pixel_size) +static void i830_update_wm(struct drm_device *dev, int planea_clock, int unused, + int unused2, int pixel_size) { struct drm_i915_private *dev_priv = dev->dev_private; uint32_t fwater_lo = I915_READ(FW_BLC) & ~0xfff; int planea_wm; - i830_wm_info.fifo_size = intel_get_fifo_size(dev, 0); + i830_wm_info.fifo_size = dev_priv->display.get_fifo_size(dev, 0); planea_wm = intel_calculate_wm(planea_clock, &i830_wm_info, pixel_size, latency_ns); @@ -2264,6 +2579,7 @@ static void i830_update_wm(struct drm_device *dev, int planea_clock, */ static void intel_update_watermarks(struct drm_device *dev) { + struct drm_i915_private *dev_priv = dev->dev_private; struct drm_crtc *crtc; struct intel_crtc *intel_crtc; int sr_hdisplay = 0; @@ -2302,15 +2618,8 @@ static void intel_update_watermarks(struct drm_device *dev) else if (IS_IGD(dev)) igd_disable_cxsr(dev); - if (IS_G4X(dev)) - g4x_update_wm(dev); - else if (IS_I965G(dev)) - i965_update_wm(dev); - else if (IS_I9XX(dev) || IS_MOBILE(dev)) - i9xx_update_wm(dev, planea_clock, planeb_clock, sr_hdisplay, - pixel_size); - else - i830_update_wm(dev, planea_clock, pixel_size); + dev_priv->display.update_wm(dev, planea_clock, planeb_clock, + sr_hdisplay, pixel_size); } static int intel_crtc_mode_set(struct drm_crtc *crtc, @@ -2323,10 +2632,11 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); int pipe = intel_crtc->pipe; + int plane = intel_crtc->plane; int fp_reg = (pipe == 0) ? FPA0 : FPB0; int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B; int dpll_md_reg = (intel_crtc->pipe == 0) ? DPLL_A_MD : DPLL_B_MD; - int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR; + int dspcntr_reg = (plane == 0) ? DSPACNTR : DSPBCNTR; int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF; int htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B; int hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B; @@ -2334,8 +2644,8 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, int vtot_reg = (pipe == 0) ? VTOTAL_A : VTOTAL_B; int vblank_reg = (pipe == 0) ? VBLANK_A : VBLANK_B; int vsync_reg = (pipe == 0) ? VSYNC_A : VSYNC_B; - int dspsize_reg = (pipe == 0) ? DSPASIZE : DSPBSIZE; - int dsppos_reg = (pipe == 0) ? DSPAPOS : DSPBPOS; + int dspsize_reg = (plane == 0) ? DSPASIZE : DSPBSIZE; + int dsppos_reg = (plane == 0) ? DSPAPOS : DSPBPOS; int pipesrc_reg = (pipe == 0) ? PIPEASRC : PIPEBSRC; int refclk, num_outputs = 0; intel_clock_t clock, reduced_clock; @@ -2568,7 +2878,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, enable color space conversion */ if (!IS_IGDNG(dev)) { if (pipe == 0) - dspcntr |= DISPPLANE_SEL_PIPE_A; + dspcntr &= ~DISPPLANE_SEL_PIPE_MASK; else dspcntr |= DISPPLANE_SEL_PIPE_B; } @@ -2580,7 +2890,8 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, * XXX: No double-wide on 915GM pipe B. Is that the only reason for the * pipe == 0 check? */ - if (mode->clock > intel_get_core_clock_speed(dev) * 9 / 10) + if (mode->clock > + dev_priv->display.get_display_clock_speed(dev) * 9 / 10) pipeconf |= PIPEACONF_DOUBLE_WIDE; else pipeconf &= ~PIPEACONF_DOUBLE_WIDE; @@ -2652,9 +2963,12 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, udelay(150); if (IS_I965G(dev) && !IS_IGDNG(dev)) { - sdvo_pixel_multiply = adjusted_mode->clock / mode->clock; - I915_WRITE(dpll_md_reg, (0 << DPLL_MD_UDI_DIVIDER_SHIFT) | + if (is_sdvo) { + sdvo_pixel_multiply = adjusted_mode->clock / mode->clock; + I915_WRITE(dpll_md_reg, (0 << DPLL_MD_UDI_DIVIDER_SHIFT) | ((sdvo_pixel_multiply - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT)); + } else + I915_WRITE(dpll_md_reg, 0); } else { /* write it again -- the BIOS does, after all */ I915_WRITE(dpll_reg, dpll); @@ -2734,6 +3048,9 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, /* Flush the plane changes */ ret = intel_pipe_set_base(crtc, x, y, old_fb); + if ((IS_I965G(dev) || plane == 0)) + intel_update_fbc(crtc, &crtc->mode); + intel_update_watermarks(dev); drm_vblank_post_modeset(dev, pipe); @@ -2778,6 +3095,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, struct drm_gem_object *bo; struct drm_i915_gem_object *obj_priv; int pipe = intel_crtc->pipe; + int plane = intel_crtc->plane; uint32_t control = (pipe == 0) ? CURACNTR : CURBCNTR; uint32_t base = (pipe == 0) ? CURABASE : CURBBASE; uint32_t temp = I915_READ(control); @@ -2863,6 +3181,10 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, i915_gem_object_unpin(intel_crtc->cursor_bo); drm_gem_object_unreference(intel_crtc->cursor_bo); } + + if ((IS_I965G(dev) || plane == 0)) + intel_update_fbc(crtc, &crtc->mode); + mutex_unlock(&dev->struct_mutex); intel_crtc->cursor_addr = addr; @@ -3544,6 +3866,14 @@ static void intel_crtc_init(struct drm_device *dev, int pipe) intel_crtc->lut_b[i] = i; } + /* Swap pipes & planes for FBC on pre-965 */ + intel_crtc->pipe = pipe; + intel_crtc->plane = pipe; + if (IS_MOBILE(dev) && (IS_I9XX(dev) && !IS_I965G(dev))) { + DRM_DEBUG("swapping pipes & planes for FBC\n"); + intel_crtc->plane = ((pipe == 0) ? 1 : 0); + } + intel_crtc->cursor_addr = 0; intel_crtc->dpms_mode = DRM_MODE_DPMS_OFF; drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs); @@ -3826,6 +4156,73 @@ void intel_init_clock_gating(struct drm_device *dev) } } +/* Set up chip specific display functions */ +static void intel_init_display(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + /* We always want a DPMS function */ + if (IS_IGDNG(dev)) + dev_priv->display.dpms = igdng_crtc_dpms; + else + dev_priv->display.dpms = i9xx_crtc_dpms; + + /* Only mobile has FBC, leave pointers NULL for other chips */ + if (IS_MOBILE(dev)) { + if (IS_GM45(dev)) { + dev_priv->display.fbc_enabled = g4x_fbc_enabled; + dev_priv->display.enable_fbc = g4x_enable_fbc; + dev_priv->display.disable_fbc = g4x_disable_fbc; + } else if (IS_I965GM(dev) || IS_I945GM(dev) || IS_I915GM(dev)) { + dev_priv->display.fbc_enabled = i8xx_fbc_enabled; + dev_priv->display.enable_fbc = i8xx_enable_fbc; + dev_priv->display.disable_fbc = i8xx_disable_fbc; + } + /* 855GM needs testing */ + } + + /* Returns the core display clock speed */ + if (IS_I945G(dev)) + dev_priv->display.get_display_clock_speed = + i945_get_display_clock_speed; + else if (IS_I915G(dev)) + dev_priv->display.get_display_clock_speed = + i915_get_display_clock_speed; + else if (IS_I945GM(dev) || IS_845G(dev) || IS_IGDGM(dev)) + dev_priv->display.get_display_clock_speed = + i9xx_misc_get_display_clock_speed; + else if (IS_I915GM(dev)) + dev_priv->display.get_display_clock_speed = + i915gm_get_display_clock_speed; + else if (IS_I865G(dev)) + dev_priv->display.get_display_clock_speed = + i865_get_display_clock_speed; + else if (IS_I855(dev)) + dev_priv->display.get_display_clock_speed = + i855_get_display_clock_speed; + else /* 852, 830 */ + dev_priv->display.get_display_clock_speed = + i830_get_display_clock_speed; + + /* For FIFO watermark updates */ + if (IS_G4X(dev)) + dev_priv->display.update_wm = g4x_update_wm; + else if (IS_I965G(dev)) + dev_priv->display.update_wm = i965_update_wm; + else if (IS_I9XX(dev) || IS_MOBILE(dev)) { + dev_priv->display.update_wm = i9xx_update_wm; + dev_priv->display.get_fifo_size = i9xx_get_fifo_size; + } else { + if (IS_I85X(dev)) + dev_priv->display.get_fifo_size = i85x_get_fifo_size; + else if (IS_845G(dev)) + dev_priv->display.get_fifo_size = i845_get_fifo_size; + else + dev_priv->display.get_fifo_size = i830_get_fifo_size; + dev_priv->display.update_wm = i830_update_wm; + } +} + void intel_modeset_init(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -3839,6 +4236,8 @@ void intel_modeset_init(struct drm_device *dev) dev->mode_config.funcs = (void *)&intel_mode_funcs; + intel_init_display(dev); + if (IS_I965G(dev)) { dev->mode_config.max_width = 8192; dev->mode_config.max_height = 8192; @@ -3904,6 +4303,9 @@ void intel_modeset_cleanup(struct drm_device *dev) mutex_unlock(&dev->struct_mutex); + if (dev_priv->display.disable_fbc) + dev_priv->display.disable_fbc(dev); + drm_mode_config_cleanup(dev); } diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 3ebbbabfe59b..8aa4b7f30daa 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -28,6 +28,7 @@ #include #include #include +#include "i915_drv.h" #include "drm_crtc.h" #include "drm_crtc_helper.h" @@ -111,8 +112,8 @@ struct intel_output { struct intel_crtc { struct drm_crtc base; - int pipe; - int plane; + enum pipe pipe; + enum plane plane; struct drm_gem_object *cursor_bo; uint32_t cursor_addr; u8 lut_r[256], lut_g[256], lut_b[256]; diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index dafc0da1c256..98ae3d73577e 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c @@ -27,6 +27,7 @@ * Jesse Barnes */ +#include #include #include #include "drmP.h" @@ -295,6 +296,10 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder, goto out; } + /* full screen scale for now */ + if (IS_IGDNG(dev)) + goto out; + /* 965+ wants fuzzy fitting */ if (IS_I965G(dev)) pfit_control |= (intel_crtc->pipe << PFIT_PIPE_SHIFT) | @@ -322,8 +327,10 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder, * to register description and PRM. * Change the value here to see the borders for debugging */ - I915_WRITE(BCLRPAT_A, 0); - I915_WRITE(BCLRPAT_B, 0); + if (!IS_IGDNG(dev)) { + I915_WRITE(BCLRPAT_A, 0); + I915_WRITE(BCLRPAT_B, 0); + } switch (lvds_priv->fitting_mode) { case DRM_MODE_SCALE_CENTER: @@ -572,7 +579,6 @@ static void intel_lvds_mode_set(struct drm_encoder *encoder, * settings. */ - /* No panel fitting yet, fixme */ if (IS_IGDNG(dev)) return; @@ -585,15 +591,33 @@ static void intel_lvds_mode_set(struct drm_encoder *encoder, I915_WRITE(PFIT_CONTROL, lvds_priv->pfit_control); } +/* Some lid devices report incorrect lid status, assume they're connected */ +static const struct dmi_system_id bad_lid_status[] = { + { + .ident = "Aspire One", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Acer"), + DMI_MATCH(DMI_PRODUCT_NAME, "Aspire one"), + }, + }, + { } +}; + /** * Detect the LVDS connection. * - * This always returns CONNECTOR_STATUS_CONNECTED. This connector should only have - * been set up if the LVDS was actually connected anyway. + * Since LVDS doesn't have hotlug, we use the lid as a proxy. Open means + * connected and closed means disconnected. We also send hotplug events as + * needed, using lid status notification from the input layer. */ static enum drm_connector_status intel_lvds_detect(struct drm_connector *connector) { - return connector_status_connected; + enum drm_connector_status status = connector_status_connected; + + if (!acpi_lid_open() && !dmi_check_system(bad_lid_status)) + status = connector_status_disconnected; + + return status; } /** @@ -632,6 +656,24 @@ static int intel_lvds_get_modes(struct drm_connector *connector) return 0; } +static int intel_lid_notify(struct notifier_block *nb, unsigned long val, + void *unused) +{ + struct drm_i915_private *dev_priv = + container_of(nb, struct drm_i915_private, lid_notifier); + struct drm_device *dev = dev_priv->dev; + + if (acpi_lid_open() && !dev_priv->suspended) { + mutex_lock(&dev->mode_config.mutex); + drm_helper_resume_force_mode(dev); + mutex_unlock(&dev->mode_config.mutex); + } + + drm_sysfs_hotplug_event(dev_priv->dev); + + return NOTIFY_OK; +} + /** * intel_lvds_destroy - unregister and free LVDS structures * @connector: connector to free @@ -641,10 +683,14 @@ static int intel_lvds_get_modes(struct drm_connector *connector) */ static void intel_lvds_destroy(struct drm_connector *connector) { + struct drm_device *dev = connector->dev; struct intel_output *intel_output = to_intel_output(connector); + struct drm_i915_private *dev_priv = dev->dev_private; if (intel_output->ddc_bus) intel_i2c_destroy(intel_output->ddc_bus); + if (dev_priv->lid_notifier.notifier_call) + acpi_lid_notifier_unregister(&dev_priv->lid_notifier); drm_sysfs_connector_remove(connector); drm_connector_cleanup(connector); kfree(connector); @@ -1011,6 +1057,11 @@ out: pwm |= PWM_PCH_ENABLE; I915_WRITE(BLC_PWM_PCH_CTL1, pwm); } + dev_priv->lid_notifier.notifier_call = intel_lid_notify; + if (acpi_lid_notifier_register(&dev_priv->lid_notifier)) { + DRM_DEBUG("lid notifier registration failed\n"); + dev_priv->lid_notifier.notifier_call = NULL; + } drm_sysfs_connector_add(connector); return; diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index 0bf28efcf2c1..083bec2e50f9 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c @@ -135,6 +135,30 @@ struct intel_sdvo_priv { struct intel_sdvo_dtd save_input_dtd_1, save_input_dtd_2; struct intel_sdvo_dtd save_output_dtd[16]; u32 save_SDVOX; + /* add the property for the SDVO-TV */ + struct drm_property *left_property; + struct drm_property *right_property; + struct drm_property *top_property; + struct drm_property *bottom_property; + struct drm_property *hpos_property; + struct drm_property *vpos_property; + + /* add the property for the SDVO-TV/LVDS */ + struct drm_property *brightness_property; + struct drm_property *contrast_property; + struct drm_property *saturation_property; + struct drm_property *hue_property; + + /* Add variable to record current setting for the above property */ + u32 left_margin, right_margin, top_margin, bottom_margin; + /* this is to get the range of margin.*/ + u32 max_hscan, max_vscan; + u32 max_hpos, cur_hpos; + u32 max_vpos, cur_vpos; + u32 cur_brightness, max_brightness; + u32 cur_contrast, max_contrast; + u32 cur_saturation, max_saturation; + u32 cur_hue, max_hue; }; static bool @@ -281,6 +305,31 @@ static const struct _sdvo_cmd_name { SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SCALED_HDTV_RESOLUTION_SUPPORT), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS), + /* Add the op code for SDVO enhancements */ + SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_POSITION_H), + SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_POSITION_H), + SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_POSITION_H), + SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_POSITION_V), + SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_POSITION_V), + SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_POSITION_V), + SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_SATURATION), + SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SATURATION), + SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_SATURATION), + SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_HUE), + SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HUE), + SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HUE), + SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_CONTRAST), + SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_CONTRAST), + SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CONTRAST), + SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_BRIGHTNESS), + SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_BRIGHTNESS), + SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_BRIGHTNESS), + SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_OVERSCAN_H), + SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OVERSCAN_H), + SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OVERSCAN_H), + SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_OVERSCAN_V), + SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OVERSCAN_V), + SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OVERSCAN_V), /* HDMI op code */ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPP_ENCODE), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ENCODE), @@ -981,7 +1030,7 @@ static void intel_sdvo_set_tv_format(struct intel_output *output) status = intel_sdvo_read_response(output, NULL, 0); if (status != SDVO_CMD_STATUS_SUCCESS) - DRM_DEBUG("%s: Failed to set TV format\n", + DRM_DEBUG_KMS("%s: Failed to set TV format\n", SDVO_NAME(sdvo_priv)); } @@ -1792,6 +1841,45 @@ static int intel_sdvo_get_modes(struct drm_connector *connector) return 1; } +static +void intel_sdvo_destroy_enhance_property(struct drm_connector *connector) +{ + struct intel_output *intel_output = to_intel_output(connector); + struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; + struct drm_device *dev = connector->dev; + + if (sdvo_priv->is_tv) { + if (sdvo_priv->left_property) + drm_property_destroy(dev, sdvo_priv->left_property); + if (sdvo_priv->right_property) + drm_property_destroy(dev, sdvo_priv->right_property); + if (sdvo_priv->top_property) + drm_property_destroy(dev, sdvo_priv->top_property); + if (sdvo_priv->bottom_property) + drm_property_destroy(dev, sdvo_priv->bottom_property); + if (sdvo_priv->hpos_property) + drm_property_destroy(dev, sdvo_priv->hpos_property); + if (sdvo_priv->vpos_property) + drm_property_destroy(dev, sdvo_priv->vpos_property); + } + if (sdvo_priv->is_tv) { + if (sdvo_priv->saturation_property) + drm_property_destroy(dev, + sdvo_priv->saturation_property); + if (sdvo_priv->contrast_property) + drm_property_destroy(dev, + sdvo_priv->contrast_property); + if (sdvo_priv->hue_property) + drm_property_destroy(dev, sdvo_priv->hue_property); + } + if (sdvo_priv->is_tv || sdvo_priv->is_lvds) { + if (sdvo_priv->brightness_property) + drm_property_destroy(dev, + sdvo_priv->brightness_property); + } + return; +} + static void intel_sdvo_destroy(struct drm_connector *connector) { struct intel_output *intel_output = to_intel_output(connector); @@ -1812,6 +1900,9 @@ static void intel_sdvo_destroy(struct drm_connector *connector) drm_property_destroy(connector->dev, sdvo_priv->tv_format_property); + if (sdvo_priv->is_tv || sdvo_priv->is_lvds) + intel_sdvo_destroy_enhance_property(connector); + drm_sysfs_connector_remove(connector); drm_connector_cleanup(connector); @@ -1829,6 +1920,8 @@ intel_sdvo_set_property(struct drm_connector *connector, struct drm_crtc *crtc = encoder->crtc; int ret = 0; bool changed = false; + uint8_t cmd, status; + uint16_t temp_value; ret = drm_connector_property_set_value(connector, property, val); if (ret < 0) @@ -1845,11 +1938,102 @@ intel_sdvo_set_property(struct drm_connector *connector, sdvo_priv->tv_format_name = sdvo_priv->tv_format_supported[val]; changed = true; - } else { - ret = -EINVAL; - goto out; } + if (sdvo_priv->is_tv || sdvo_priv->is_lvds) { + cmd = 0; + temp_value = val; + if (sdvo_priv->left_property == property) { + drm_connector_property_set_value(connector, + sdvo_priv->right_property, val); + if (sdvo_priv->left_margin == temp_value) + goto out; + + sdvo_priv->left_margin = temp_value; + sdvo_priv->right_margin = temp_value; + temp_value = sdvo_priv->max_hscan - + sdvo_priv->left_margin; + cmd = SDVO_CMD_SET_OVERSCAN_H; + } else if (sdvo_priv->right_property == property) { + drm_connector_property_set_value(connector, + sdvo_priv->left_property, val); + if (sdvo_priv->right_margin == temp_value) + goto out; + + sdvo_priv->left_margin = temp_value; + sdvo_priv->right_margin = temp_value; + temp_value = sdvo_priv->max_hscan - + sdvo_priv->left_margin; + cmd = SDVO_CMD_SET_OVERSCAN_H; + } else if (sdvo_priv->top_property == property) { + drm_connector_property_set_value(connector, + sdvo_priv->bottom_property, val); + if (sdvo_priv->top_margin == temp_value) + goto out; + + sdvo_priv->top_margin = temp_value; + sdvo_priv->bottom_margin = temp_value; + temp_value = sdvo_priv->max_vscan - + sdvo_priv->top_margin; + cmd = SDVO_CMD_SET_OVERSCAN_V; + } else if (sdvo_priv->bottom_property == property) { + drm_connector_property_set_value(connector, + sdvo_priv->top_property, val); + if (sdvo_priv->bottom_margin == temp_value) + goto out; + sdvo_priv->top_margin = temp_value; + sdvo_priv->bottom_margin = temp_value; + temp_value = sdvo_priv->max_vscan - + sdvo_priv->top_margin; + cmd = SDVO_CMD_SET_OVERSCAN_V; + } else if (sdvo_priv->hpos_property == property) { + if (sdvo_priv->cur_hpos == temp_value) + goto out; + + cmd = SDVO_CMD_SET_POSITION_H; + sdvo_priv->cur_hpos = temp_value; + } else if (sdvo_priv->vpos_property == property) { + if (sdvo_priv->cur_vpos == temp_value) + goto out; + + cmd = SDVO_CMD_SET_POSITION_V; + sdvo_priv->cur_vpos = temp_value; + } else if (sdvo_priv->saturation_property == property) { + if (sdvo_priv->cur_saturation == temp_value) + goto out; + + cmd = SDVO_CMD_SET_SATURATION; + sdvo_priv->cur_saturation = temp_value; + } else if (sdvo_priv->contrast_property == property) { + if (sdvo_priv->cur_contrast == temp_value) + goto out; + + cmd = SDVO_CMD_SET_CONTRAST; + sdvo_priv->cur_contrast = temp_value; + } else if (sdvo_priv->hue_property == property) { + if (sdvo_priv->cur_hue == temp_value) + goto out; + + cmd = SDVO_CMD_SET_HUE; + sdvo_priv->cur_hue = temp_value; + } else if (sdvo_priv->brightness_property == property) { + if (sdvo_priv->cur_brightness == temp_value) + goto out; + + cmd = SDVO_CMD_SET_BRIGHTNESS; + sdvo_priv->cur_brightness = temp_value; + } + if (cmd) { + intel_sdvo_write_cmd(intel_output, cmd, &temp_value, 2); + status = intel_sdvo_read_response(intel_output, + NULL, 0); + if (status != SDVO_CMD_STATUS_SUCCESS) { + DRM_DEBUG_KMS("Incorrect SDVO command \n"); + return -EINVAL; + } + changed = true; + } + } if (changed && crtc) drm_crtc_helper_set_mode(crtc, &crtc->mode, crtc->x, crtc->y, crtc->fb); @@ -2090,6 +2274,8 @@ intel_sdvo_output_setup(struct intel_output *intel_output, uint16_t flags) sdvo_priv->controlled_output = SDVO_OUTPUT_RGB1; encoder->encoder_type = DRM_MODE_ENCODER_DAC; connector->connector_type = DRM_MODE_CONNECTOR_VGA; + intel_output->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) | + (1 << INTEL_ANALOG_CLONE_BIT); } else if (flags & SDVO_OUTPUT_LVDS0) { sdvo_priv->controlled_output = SDVO_OUTPUT_LVDS0; @@ -2176,6 +2362,310 @@ static void intel_sdvo_tv_create_property(struct drm_connector *connector) } +static void intel_sdvo_create_enhance_property(struct drm_connector *connector) +{ + struct intel_output *intel_output = to_intel_output(connector); + struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; + struct intel_sdvo_enhancements_reply sdvo_data; + struct drm_device *dev = connector->dev; + uint8_t status; + uint16_t response, data_value[2]; + + intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS, + NULL, 0); + status = intel_sdvo_read_response(intel_output, &sdvo_data, + sizeof(sdvo_data)); + if (status != SDVO_CMD_STATUS_SUCCESS) { + DRM_DEBUG_KMS(" incorrect response is returned\n"); + return; + } + response = *((uint16_t *)&sdvo_data); + if (!response) { + DRM_DEBUG_KMS("No enhancement is supported\n"); + return; + } + if (sdvo_priv->is_tv) { + /* when horizontal overscan is supported, Add the left/right + * property + */ + if (sdvo_data.overscan_h) { + intel_sdvo_write_cmd(intel_output, + SDVO_CMD_GET_MAX_OVERSCAN_H, NULL, 0); + status = intel_sdvo_read_response(intel_output, + &data_value, 4); + if (status != SDVO_CMD_STATUS_SUCCESS) { + DRM_DEBUG_KMS("Incorrect SDVO max " + "h_overscan\n"); + return; + } + intel_sdvo_write_cmd(intel_output, + SDVO_CMD_GET_OVERSCAN_H, NULL, 0); + status = intel_sdvo_read_response(intel_output, + &response, 2); + if (status != SDVO_CMD_STATUS_SUCCESS) { + DRM_DEBUG_KMS("Incorrect SDVO h_overscan\n"); + return; + } + sdvo_priv->max_hscan = data_value[0]; + sdvo_priv->left_margin = data_value[0] - response; + sdvo_priv->right_margin = sdvo_priv->left_margin; + sdvo_priv->left_property = + drm_property_create(dev, DRM_MODE_PROP_RANGE, + "left_margin", 2); + sdvo_priv->left_property->values[0] = 0; + sdvo_priv->left_property->values[1] = data_value[0]; + drm_connector_attach_property(connector, + sdvo_priv->left_property, + sdvo_priv->left_margin); + sdvo_priv->right_property = + drm_property_create(dev, DRM_MODE_PROP_RANGE, + "right_margin", 2); + sdvo_priv->right_property->values[0] = 0; + sdvo_priv->right_property->values[1] = data_value[0]; + drm_connector_attach_property(connector, + sdvo_priv->right_property, + sdvo_priv->right_margin); + DRM_DEBUG_KMS("h_overscan: max %d, " + "default %d, current %d\n", + data_value[0], data_value[1], response); + } + if (sdvo_data.overscan_v) { + intel_sdvo_write_cmd(intel_output, + SDVO_CMD_GET_MAX_OVERSCAN_V, NULL, 0); + status = intel_sdvo_read_response(intel_output, + &data_value, 4); + if (status != SDVO_CMD_STATUS_SUCCESS) { + DRM_DEBUG_KMS("Incorrect SDVO max " + "v_overscan\n"); + return; + } + intel_sdvo_write_cmd(intel_output, + SDVO_CMD_GET_OVERSCAN_V, NULL, 0); + status = intel_sdvo_read_response(intel_output, + &response, 2); + if (status != SDVO_CMD_STATUS_SUCCESS) { + DRM_DEBUG_KMS("Incorrect SDVO v_overscan\n"); + return; + } + sdvo_priv->max_vscan = data_value[0]; + sdvo_priv->top_margin = data_value[0] - response; + sdvo_priv->bottom_margin = sdvo_priv->top_margin; + sdvo_priv->top_property = + drm_property_create(dev, DRM_MODE_PROP_RANGE, + "top_margin", 2); + sdvo_priv->top_property->values[0] = 0; + sdvo_priv->top_property->values[1] = data_value[0]; + drm_connector_attach_property(connector, + sdvo_priv->top_property, + sdvo_priv->top_margin); + sdvo_priv->bottom_property = + drm_property_create(dev, DRM_MODE_PROP_RANGE, + "bottom_margin", 2); + sdvo_priv->bottom_property->values[0] = 0; + sdvo_priv->bottom_property->values[1] = data_value[0]; + drm_connector_attach_property(connector, + sdvo_priv->bottom_property, + sdvo_priv->bottom_margin); + DRM_DEBUG_KMS("v_overscan: max %d, " + "default %d, current %d\n", + data_value[0], data_value[1], response); + } + if (sdvo_data.position_h) { + intel_sdvo_write_cmd(intel_output, + SDVO_CMD_GET_MAX_POSITION_H, NULL, 0); + status = intel_sdvo_read_response(intel_output, + &data_value, 4); + if (status != SDVO_CMD_STATUS_SUCCESS) { + DRM_DEBUG_KMS("Incorrect SDVO Max h_pos\n"); + return; + } + intel_sdvo_write_cmd(intel_output, + SDVO_CMD_GET_POSITION_H, NULL, 0); + status = intel_sdvo_read_response(intel_output, + &response, 2); + if (status != SDVO_CMD_STATUS_SUCCESS) { + DRM_DEBUG_KMS("Incorrect SDVO get h_postion\n"); + return; + } + sdvo_priv->max_hpos = data_value[0]; + sdvo_priv->cur_hpos = response; + sdvo_priv->hpos_property = + drm_property_create(dev, DRM_MODE_PROP_RANGE, + "hpos", 2); + sdvo_priv->hpos_property->values[0] = 0; + sdvo_priv->hpos_property->values[1] = data_value[0]; + drm_connector_attach_property(connector, + sdvo_priv->hpos_property, + sdvo_priv->cur_hpos); + DRM_DEBUG_KMS("h_position: max %d, " + "default %d, current %d\n", + data_value[0], data_value[1], response); + } + if (sdvo_data.position_v) { + intel_sdvo_write_cmd(intel_output, + SDVO_CMD_GET_MAX_POSITION_V, NULL, 0); + status = intel_sdvo_read_response(intel_output, + &data_value, 4); + if (status != SDVO_CMD_STATUS_SUCCESS) { + DRM_DEBUG_KMS("Incorrect SDVO Max v_pos\n"); + return; + } + intel_sdvo_write_cmd(intel_output, + SDVO_CMD_GET_POSITION_V, NULL, 0); + status = intel_sdvo_read_response(intel_output, + &response, 2); + if (status != SDVO_CMD_STATUS_SUCCESS) { + DRM_DEBUG_KMS("Incorrect SDVO get v_postion\n"); + return; + } + sdvo_priv->max_vpos = data_value[0]; + sdvo_priv->cur_vpos = response; + sdvo_priv->vpos_property = + drm_property_create(dev, DRM_MODE_PROP_RANGE, + "vpos", 2); + sdvo_priv->vpos_property->values[0] = 0; + sdvo_priv->vpos_property->values[1] = data_value[0]; + drm_connector_attach_property(connector, + sdvo_priv->vpos_property, + sdvo_priv->cur_vpos); + DRM_DEBUG_KMS("v_position: max %d, " + "default %d, current %d\n", + data_value[0], data_value[1], response); + } + } + if (sdvo_priv->is_tv) { + if (sdvo_data.saturation) { + intel_sdvo_write_cmd(intel_output, + SDVO_CMD_GET_MAX_SATURATION, NULL, 0); + status = intel_sdvo_read_response(intel_output, + &data_value, 4); + if (status != SDVO_CMD_STATUS_SUCCESS) { + DRM_DEBUG_KMS("Incorrect SDVO Max sat\n"); + return; + } + intel_sdvo_write_cmd(intel_output, + SDVO_CMD_GET_SATURATION, NULL, 0); + status = intel_sdvo_read_response(intel_output, + &response, 2); + if (status != SDVO_CMD_STATUS_SUCCESS) { + DRM_DEBUG_KMS("Incorrect SDVO get sat\n"); + return; + } + sdvo_priv->max_saturation = data_value[0]; + sdvo_priv->cur_saturation = response; + sdvo_priv->saturation_property = + drm_property_create(dev, DRM_MODE_PROP_RANGE, + "saturation", 2); + sdvo_priv->saturation_property->values[0] = 0; + sdvo_priv->saturation_property->values[1] = + data_value[0]; + drm_connector_attach_property(connector, + sdvo_priv->saturation_property, + sdvo_priv->cur_saturation); + DRM_DEBUG_KMS("saturation: max %d, " + "default %d, current %d\n", + data_value[0], data_value[1], response); + } + if (sdvo_data.contrast) { + intel_sdvo_write_cmd(intel_output, + SDVO_CMD_GET_MAX_CONTRAST, NULL, 0); + status = intel_sdvo_read_response(intel_output, + &data_value, 4); + if (status != SDVO_CMD_STATUS_SUCCESS) { + DRM_DEBUG_KMS("Incorrect SDVO Max contrast\n"); + return; + } + intel_sdvo_write_cmd(intel_output, + SDVO_CMD_GET_CONTRAST, NULL, 0); + status = intel_sdvo_read_response(intel_output, + &response, 2); + if (status != SDVO_CMD_STATUS_SUCCESS) { + DRM_DEBUG_KMS("Incorrect SDVO get contrast\n"); + return; + } + sdvo_priv->max_contrast = data_value[0]; + sdvo_priv->cur_contrast = response; + sdvo_priv->contrast_property = + drm_property_create(dev, DRM_MODE_PROP_RANGE, + "contrast", 2); + sdvo_priv->contrast_property->values[0] = 0; + sdvo_priv->contrast_property->values[1] = data_value[0]; + drm_connector_attach_property(connector, + sdvo_priv->contrast_property, + sdvo_priv->cur_contrast); + DRM_DEBUG_KMS("contrast: max %d, " + "default %d, current %d\n", + data_value[0], data_value[1], response); + } + if (sdvo_data.hue) { + intel_sdvo_write_cmd(intel_output, + SDVO_CMD_GET_MAX_HUE, NULL, 0); + status = intel_sdvo_read_response(intel_output, + &data_value, 4); + if (status != SDVO_CMD_STATUS_SUCCESS) { + DRM_DEBUG_KMS("Incorrect SDVO Max hue\n"); + return; + } + intel_sdvo_write_cmd(intel_output, + SDVO_CMD_GET_HUE, NULL, 0); + status = intel_sdvo_read_response(intel_output, + &response, 2); + if (status != SDVO_CMD_STATUS_SUCCESS) { + DRM_DEBUG_KMS("Incorrect SDVO get hue\n"); + return; + } + sdvo_priv->max_hue = data_value[0]; + sdvo_priv->cur_hue = response; + sdvo_priv->hue_property = + drm_property_create(dev, DRM_MODE_PROP_RANGE, + "hue", 2); + sdvo_priv->hue_property->values[0] = 0; + sdvo_priv->hue_property->values[1] = + data_value[0]; + drm_connector_attach_property(connector, + sdvo_priv->hue_property, + sdvo_priv->cur_hue); + DRM_DEBUG_KMS("hue: max %d, default %d, current %d\n", + data_value[0], data_value[1], response); + } + } + if (sdvo_priv->is_tv || sdvo_priv->is_lvds) { + if (sdvo_data.brightness) { + intel_sdvo_write_cmd(intel_output, + SDVO_CMD_GET_MAX_BRIGHTNESS, NULL, 0); + status = intel_sdvo_read_response(intel_output, + &data_value, 4); + if (status != SDVO_CMD_STATUS_SUCCESS) { + DRM_DEBUG_KMS("Incorrect SDVO Max bright\n"); + return; + } + intel_sdvo_write_cmd(intel_output, + SDVO_CMD_GET_BRIGHTNESS, NULL, 0); + status = intel_sdvo_read_response(intel_output, + &response, 2); + if (status != SDVO_CMD_STATUS_SUCCESS) { + DRM_DEBUG_KMS("Incorrect SDVO get brigh\n"); + return; + } + sdvo_priv->max_brightness = data_value[0]; + sdvo_priv->cur_brightness = response; + sdvo_priv->brightness_property = + drm_property_create(dev, DRM_MODE_PROP_RANGE, + "brightness", 2); + sdvo_priv->brightness_property->values[0] = 0; + sdvo_priv->brightness_property->values[1] = + data_value[0]; + drm_connector_attach_property(connector, + sdvo_priv->brightness_property, + sdvo_priv->cur_brightness); + DRM_DEBUG_KMS("brightness: max %d, " + "default %d, current %d\n", + data_value[0], data_value[1], response); + } + } + return; +} + bool intel_sdvo_init(struct drm_device *dev, int output_device) { struct drm_connector *connector; @@ -2264,6 +2754,10 @@ bool intel_sdvo_init(struct drm_device *dev, int output_device) drm_mode_connector_attach_encoder(&intel_output->base, &intel_output->enc); if (sdvo_priv->is_tv) intel_sdvo_tv_create_property(connector); + + if (sdvo_priv->is_tv || sdvo_priv->is_lvds) + intel_sdvo_create_enhance_property(connector); + drm_sysfs_connector_add(connector); intel_sdvo_select_ddc_bus(sdvo_priv); diff --git a/include/acpi/button.h b/include/acpi/button.h new file mode 100644 index 000000000000..97eea0e4c016 --- /dev/null +++ b/include/acpi/button.h @@ -0,0 +1,25 @@ +#ifndef ACPI_BUTTON_H +#define ACPI_BUTTON_H + +#include + +#if defined(CONFIG_ACPI_BUTTON) || defined(CONFIG_ACPI_BUTTON_MODULE) +extern int acpi_lid_notifier_register(struct notifier_block *nb); +extern int acpi_lid_notifier_unregister(struct notifier_block *nb); +extern int acpi_lid_open(void); +#else +static inline int acpi_lid_notifier_register(struct notifier_block *nb) +{ + return 0; +} +static inline int acpi_lid_notifier_unregister(struct notifier_block *nb) +{ + return 0; +} +static inline int acpi_lid_open(void) +{ + return 1; +} +#endif /* defined(CONFIG_ACPI_BUTTON) || defined(CONFIG_ACPI_BUTTON_MODULE) */ + +#endif /* ACPI_BUTTON_H */ diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h index 853508499d20..3f6e545609be 100644 --- a/include/drm/drm_pciids.h +++ b/include/drm/drm_pciids.h @@ -552,6 +552,7 @@ {0x8086, 0x2e12, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \ {0x8086, 0x2e22, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \ {0x8086, 0x2e32, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \ + {0x8086, 0x2e42, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \ {0x8086, 0xa001, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \ {0x8086, 0xa011, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \ {0x8086, 0x35e8, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \ diff --git a/include/drm/i915_drm.h b/include/drm/i915_drm.h index 8e1e92583fbc..7e0cb1da92e6 100644 --- a/include/drm/i915_drm.h +++ b/include/drm/i915_drm.h @@ -185,6 +185,7 @@ typedef struct _drm_i915_sarea { #define DRM_I915_GEM_GET_APERTURE 0x23 #define DRM_I915_GEM_MMAP_GTT 0x24 #define DRM_I915_GET_PIPE_FROM_CRTC_ID 0x25 +#define DRM_I915_GEM_MADVISE 0x26 #define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t) #define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH) @@ -221,6 +222,7 @@ typedef struct _drm_i915_sarea { #define DRM_IOCTL_I915_GEM_GET_TILING DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_TILING, struct drm_i915_gem_get_tiling) #define DRM_IOCTL_I915_GEM_GET_APERTURE DRM_IOR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_APERTURE, struct drm_i915_gem_get_aperture) #define DRM_IOCTL_I915_GET_PIPE_FROM_CRTC_ID DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GET_PIPE_FROM_CRTC_ID, struct drm_intel_get_pipe_from_crtc_id) +#define DRM_IOCTL_I915_GEM_MADVISE DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MADVISE, struct drm_i915_gem_madvise) /* Allow drivers to submit batchbuffers directly to hardware, relying * on the security mechanisms provided by hardware. @@ -667,4 +669,21 @@ struct drm_i915_get_pipe_from_crtc_id { __u32 pipe; }; +#define I915_MADV_WILLNEED 0 +#define I915_MADV_DONTNEED 1 +#define __I915_MADV_PURGED 2 /* internal state */ + +struct drm_i915_gem_madvise { + /** Handle of the buffer to change the backing store advice */ + __u32 handle; + + /* Advice: either the buffer will be needed again in the near future, + * or wont be and could be discarded under memory pressure. + */ + __u32 madv; + + /** Whether the backing store still exists. */ + __u32 retained; +}; + #endif /* _I915_DRM_H_ */