forked from Minki/linux
drm/i915: Implement GPU semaphores for inter-ring synchronisation on SNB
The bulk of the change is to convert the growing list of rings into an array so that the relationship between the rings and the semaphore sync registers can be easily computed. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
This commit is contained in:
parent
340479aac6
commit
1ec14ad313
@ -339,10 +339,10 @@ static int i915_gem_request_info(struct seq_file *m, void *data)
|
||||
return ret;
|
||||
|
||||
count = 0;
|
||||
if (!list_empty(&dev_priv->render_ring.request_list)) {
|
||||
if (!list_empty(&dev_priv->ring[RCS].request_list)) {
|
||||
seq_printf(m, "Render requests:\n");
|
||||
list_for_each_entry(gem_request,
|
||||
&dev_priv->render_ring.request_list,
|
||||
&dev_priv->ring[RCS].request_list,
|
||||
list) {
|
||||
seq_printf(m, " %d @ %d\n",
|
||||
gem_request->seqno,
|
||||
@ -350,10 +350,10 @@ static int i915_gem_request_info(struct seq_file *m, void *data)
|
||||
}
|
||||
count++;
|
||||
}
|
||||
if (!list_empty(&dev_priv->bsd_ring.request_list)) {
|
||||
if (!list_empty(&dev_priv->ring[VCS].request_list)) {
|
||||
seq_printf(m, "BSD requests:\n");
|
||||
list_for_each_entry(gem_request,
|
||||
&dev_priv->bsd_ring.request_list,
|
||||
&dev_priv->ring[VCS].request_list,
|
||||
list) {
|
||||
seq_printf(m, " %d @ %d\n",
|
||||
gem_request->seqno,
|
||||
@ -361,10 +361,10 @@ static int i915_gem_request_info(struct seq_file *m, void *data)
|
||||
}
|
||||
count++;
|
||||
}
|
||||
if (!list_empty(&dev_priv->blt_ring.request_list)) {
|
||||
if (!list_empty(&dev_priv->ring[BCS].request_list)) {
|
||||
seq_printf(m, "BLT requests:\n");
|
||||
list_for_each_entry(gem_request,
|
||||
&dev_priv->blt_ring.request_list,
|
||||
&dev_priv->ring[BCS].request_list,
|
||||
list) {
|
||||
seq_printf(m, " %d @ %d\n",
|
||||
gem_request->seqno,
|
||||
@ -398,15 +398,14 @@ static int i915_gem_seqno_info(struct seq_file *m, void *data)
|
||||
struct drm_info_node *node = (struct drm_info_node *) m->private;
|
||||
struct drm_device *dev = node->minor->dev;
|
||||
drm_i915_private_t *dev_priv = dev->dev_private;
|
||||
int ret;
|
||||
int ret, i;
|
||||
|
||||
ret = mutex_lock_interruptible(&dev->struct_mutex);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
i915_ring_seqno_info(m, &dev_priv->render_ring);
|
||||
i915_ring_seqno_info(m, &dev_priv->bsd_ring);
|
||||
i915_ring_seqno_info(m, &dev_priv->blt_ring);
|
||||
for (i = 0; i < I915_NUM_RINGS; i++)
|
||||
i915_ring_seqno_info(m, &dev_priv->ring[i]);
|
||||
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
@ -419,7 +418,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
|
||||
struct drm_info_node *node = (struct drm_info_node *) m->private;
|
||||
struct drm_device *dev = node->minor->dev;
|
||||
drm_i915_private_t *dev_priv = dev->dev_private;
|
||||
int ret;
|
||||
int ret, i;
|
||||
|
||||
ret = mutex_lock_interruptible(&dev->struct_mutex);
|
||||
if (ret)
|
||||
@ -458,9 +457,8 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
|
||||
}
|
||||
seq_printf(m, "Interrupts received: %d\n",
|
||||
atomic_read(&dev_priv->irq_received));
|
||||
i915_ring_seqno_info(m, &dev_priv->render_ring);
|
||||
i915_ring_seqno_info(m, &dev_priv->bsd_ring);
|
||||
i915_ring_seqno_info(m, &dev_priv->blt_ring);
|
||||
for (i = 0; i < I915_NUM_RINGS; i++)
|
||||
i915_ring_seqno_info(m, &dev_priv->ring[i]);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
return 0;
|
||||
@ -503,13 +501,7 @@ static int i915_hws_info(struct seq_file *m, void *data)
|
||||
volatile u32 *hws;
|
||||
int i;
|
||||
|
||||
switch ((uintptr_t)node->info_ent->data) {
|
||||
case RING_RENDER: ring = &dev_priv->render_ring; break;
|
||||
case RING_BSD: ring = &dev_priv->bsd_ring; break;
|
||||
case RING_BLT: ring = &dev_priv->blt_ring; break;
|
||||
default: return -EINVAL;
|
||||
}
|
||||
|
||||
ring = &dev_priv->ring[(uintptr_t)node->info_ent->data];
|
||||
hws = (volatile u32 *)ring->status_page.page_addr;
|
||||
if (hws == NULL)
|
||||
return 0;
|
||||
@ -569,17 +561,11 @@ static int i915_ringbuffer_data(struct seq_file *m, void *data)
|
||||
struct intel_ring_buffer *ring;
|
||||
int ret;
|
||||
|
||||
switch ((uintptr_t)node->info_ent->data) {
|
||||
case RING_RENDER: ring = &dev_priv->render_ring; break;
|
||||
case RING_BSD: ring = &dev_priv->bsd_ring; break;
|
||||
case RING_BLT: ring = &dev_priv->blt_ring; break;
|
||||
default: return -EINVAL;
|
||||
}
|
||||
|
||||
ret = mutex_lock_interruptible(&dev->struct_mutex);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ring = &dev_priv->ring[(uintptr_t)node->info_ent->data];
|
||||
if (!ring->obj) {
|
||||
seq_printf(m, "No ringbuffer setup\n");
|
||||
} else {
|
||||
@ -603,21 +589,20 @@ static int i915_ringbuffer_info(struct seq_file *m, void *data)
|
||||
drm_i915_private_t *dev_priv = dev->dev_private;
|
||||
struct intel_ring_buffer *ring;
|
||||
|
||||
switch ((uintptr_t)node->info_ent->data) {
|
||||
case RING_RENDER: ring = &dev_priv->render_ring; break;
|
||||
case RING_BSD: ring = &dev_priv->bsd_ring; break;
|
||||
case RING_BLT: ring = &dev_priv->blt_ring; break;
|
||||
default: return -EINVAL;
|
||||
}
|
||||
|
||||
ring = &dev_priv->ring[(uintptr_t)node->info_ent->data];
|
||||
if (ring->size == 0)
|
||||
return 0;
|
||||
return 0;
|
||||
|
||||
seq_printf(m, "Ring %s:\n", ring->name);
|
||||
seq_printf(m, " Head : %08x\n", I915_READ_HEAD(ring) & HEAD_ADDR);
|
||||
seq_printf(m, " Tail : %08x\n", I915_READ_TAIL(ring) & TAIL_ADDR);
|
||||
seq_printf(m, " Size : %08x\n", ring->size);
|
||||
seq_printf(m, " Active : %08x\n", intel_ring_get_active_head(ring));
|
||||
seq_printf(m, " NOPID : %08x\n", I915_READ_NOPID(ring));
|
||||
if (IS_GEN6(dev)) {
|
||||
seq_printf(m, " Sync 0 : %08x\n", I915_READ_SYNC_0(ring));
|
||||
seq_printf(m, " Sync 1 : %08x\n", I915_READ_SYNC_1(ring));
|
||||
}
|
||||
seq_printf(m, " Control : %08x\n", I915_READ_CTL(ring));
|
||||
seq_printf(m, " Start : %08x\n", I915_READ_START(ring));
|
||||
|
||||
@ -1177,15 +1162,15 @@ static struct drm_info_list i915_debugfs_list[] = {
|
||||
{"i915_gem_seqno", i915_gem_seqno_info, 0},
|
||||
{"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
|
||||
{"i915_gem_interrupt", i915_interrupt_info, 0},
|
||||
{"i915_gem_hws", i915_hws_info, 0, (void *)RING_RENDER},
|
||||
{"i915_gem_hws_blt", i915_hws_info, 0, (void *)RING_BLT},
|
||||
{"i915_gem_hws_bsd", i915_hws_info, 0, (void *)RING_BSD},
|
||||
{"i915_ringbuffer_data", i915_ringbuffer_data, 0, (void *)RING_RENDER},
|
||||
{"i915_ringbuffer_info", i915_ringbuffer_info, 0, (void *)RING_RENDER},
|
||||
{"i915_bsd_ringbuffer_data", i915_ringbuffer_data, 0, (void *)RING_BSD},
|
||||
{"i915_bsd_ringbuffer_info", i915_ringbuffer_info, 0, (void *)RING_BSD},
|
||||
{"i915_blt_ringbuffer_data", i915_ringbuffer_data, 0, (void *)RING_BLT},
|
||||
{"i915_blt_ringbuffer_info", i915_ringbuffer_info, 0, (void *)RING_BLT},
|
||||
{"i915_gem_hws", i915_hws_info, 0, (void *)RCS},
|
||||
{"i915_gem_hws_blt", i915_hws_info, 0, (void *)BCS},
|
||||
{"i915_gem_hws_bsd", i915_hws_info, 0, (void *)VCS},
|
||||
{"i915_ringbuffer_data", i915_ringbuffer_data, 0, (void *)RCS},
|
||||
{"i915_ringbuffer_info", i915_ringbuffer_info, 0, (void *)RCS},
|
||||
{"i915_bsd_ringbuffer_data", i915_ringbuffer_data, 0, (void *)VCS},
|
||||
{"i915_bsd_ringbuffer_info", i915_ringbuffer_info, 0, (void *)VCS},
|
||||
{"i915_blt_ringbuffer_data", i915_ringbuffer_data, 0, (void *)BCS},
|
||||
{"i915_blt_ringbuffer_info", i915_ringbuffer_info, 0, (void *)BCS},
|
||||
{"i915_batchbuffers", i915_batchbuffer_info, 0},
|
||||
{"i915_error_state", i915_error_state, 0},
|
||||
{"i915_rstdby_delays", i915_rstdby_delays, 0},
|
||||
|
@ -49,6 +49,8 @@
|
||||
static int i915_init_phys_hws(struct drm_device *dev)
|
||||
{
|
||||
drm_i915_private_t *dev_priv = dev->dev_private;
|
||||
struct intel_ring_buffer *ring = LP_RING(dev_priv);
|
||||
|
||||
/* Program Hardware Status Page */
|
||||
dev_priv->status_page_dmah =
|
||||
drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE);
|
||||
@ -57,11 +59,10 @@ static int i915_init_phys_hws(struct drm_device *dev)
|
||||
DRM_ERROR("Can not allocate hardware status page\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
dev_priv->render_ring.status_page.page_addr
|
||||
= dev_priv->status_page_dmah->vaddr;
|
||||
ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
|
||||
dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr;
|
||||
|
||||
memset(dev_priv->render_ring.status_page.page_addr, 0, PAGE_SIZE);
|
||||
memset(ring->status_page.page_addr, 0, PAGE_SIZE);
|
||||
|
||||
if (INTEL_INFO(dev)->gen >= 4)
|
||||
dev_priv->dma_status_page |= (dev_priv->dma_status_page >> 28) &
|
||||
@ -79,13 +80,15 @@ static int i915_init_phys_hws(struct drm_device *dev)
|
||||
static void i915_free_hws(struct drm_device *dev)
|
||||
{
|
||||
drm_i915_private_t *dev_priv = dev->dev_private;
|
||||
struct intel_ring_buffer *ring = LP_RING(dev_priv);
|
||||
|
||||
if (dev_priv->status_page_dmah) {
|
||||
drm_pci_free(dev, dev_priv->status_page_dmah);
|
||||
dev_priv->status_page_dmah = NULL;
|
||||
}
|
||||
|
||||
if (dev_priv->render_ring.status_page.gfx_addr) {
|
||||
dev_priv->render_ring.status_page.gfx_addr = 0;
|
||||
if (ring->status_page.gfx_addr) {
|
||||
ring->status_page.gfx_addr = 0;
|
||||
drm_core_ioremapfree(&dev_priv->hws_map, dev);
|
||||
}
|
||||
|
||||
@ -97,7 +100,7 @@ void i915_kernel_lost_context(struct drm_device * dev)
|
||||
{
|
||||
drm_i915_private_t *dev_priv = dev->dev_private;
|
||||
struct drm_i915_master_private *master_priv;
|
||||
struct intel_ring_buffer *ring = &dev_priv->render_ring;
|
||||
struct intel_ring_buffer *ring = LP_RING(dev_priv);
|
||||
|
||||
/*
|
||||
* We should never lose context on the ring with modesetting
|
||||
@ -123,6 +126,8 @@ void i915_kernel_lost_context(struct drm_device * dev)
|
||||
static int i915_dma_cleanup(struct drm_device * dev)
|
||||
{
|
||||
drm_i915_private_t *dev_priv = dev->dev_private;
|
||||
int i;
|
||||
|
||||
/* Make sure interrupts are disabled here because the uninstall ioctl
|
||||
* may not have been called from userspace and after dev_private
|
||||
* is freed, it's too late.
|
||||
@ -131,9 +136,8 @@ static int i915_dma_cleanup(struct drm_device * dev)
|
||||
drm_irq_uninstall(dev);
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
intel_cleanup_ring_buffer(&dev_priv->render_ring);
|
||||
intel_cleanup_ring_buffer(&dev_priv->bsd_ring);
|
||||
intel_cleanup_ring_buffer(&dev_priv->blt_ring);
|
||||
for (i = 0; i < I915_NUM_RINGS; i++)
|
||||
intel_cleanup_ring_buffer(&dev_priv->ring[i]);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
/* Clear the HWS virtual address at teardown */
|
||||
@ -147,6 +151,7 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
|
||||
{
|
||||
drm_i915_private_t *dev_priv = dev->dev_private;
|
||||
struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
|
||||
struct intel_ring_buffer *ring = LP_RING(dev_priv);
|
||||
|
||||
master_priv->sarea = drm_getsarea(dev);
|
||||
if (master_priv->sarea) {
|
||||
@ -157,24 +162,24 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
|
||||
}
|
||||
|
||||
if (init->ring_size != 0) {
|
||||
if (dev_priv->render_ring.obj != NULL) {
|
||||
if (ring->obj != NULL) {
|
||||
i915_dma_cleanup(dev);
|
||||
DRM_ERROR("Client tried to initialize ringbuffer in "
|
||||
"GEM mode\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
dev_priv->render_ring.size = init->ring_size;
|
||||
ring->size = init->ring_size;
|
||||
|
||||
dev_priv->render_ring.map.offset = init->ring_start;
|
||||
dev_priv->render_ring.map.size = init->ring_size;
|
||||
dev_priv->render_ring.map.type = 0;
|
||||
dev_priv->render_ring.map.flags = 0;
|
||||
dev_priv->render_ring.map.mtrr = 0;
|
||||
ring->map.offset = init->ring_start;
|
||||
ring->map.size = init->ring_size;
|
||||
ring->map.type = 0;
|
||||
ring->map.flags = 0;
|
||||
ring->map.mtrr = 0;
|
||||
|
||||
drm_core_ioremap_wc(&dev_priv->render_ring.map, dev);
|
||||
drm_core_ioremap_wc(&ring->map, dev);
|
||||
|
||||
if (dev_priv->render_ring.map.handle == NULL) {
|
||||
if (ring->map.handle == NULL) {
|
||||
i915_dma_cleanup(dev);
|
||||
DRM_ERROR("can not ioremap virtual address for"
|
||||
" ring buffer\n");
|
||||
@ -182,7 +187,7 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
|
||||
}
|
||||
}
|
||||
|
||||
dev_priv->render_ring.virtual_start = dev_priv->render_ring.map.handle;
|
||||
ring->virtual_start = ring->map.handle;
|
||||
|
||||
dev_priv->cpp = init->cpp;
|
||||
dev_priv->back_offset = init->back_offset;
|
||||
@ -201,12 +206,10 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
|
||||
static int i915_dma_resume(struct drm_device * dev)
|
||||
{
|
||||
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
|
||||
struct intel_ring_buffer *ring = LP_RING(dev_priv);
|
||||
|
||||
struct intel_ring_buffer *ring;
|
||||
DRM_DEBUG_DRIVER("%s\n", __func__);
|
||||
|
||||
ring = &dev_priv->render_ring;
|
||||
|
||||
if (ring->map.handle == NULL) {
|
||||
DRM_ERROR("can not ioremap virtual address for"
|
||||
" ring buffer\n");
|
||||
@ -326,7 +329,7 @@ static int i915_emit_cmds(struct drm_device * dev, int *buffer, int dwords)
|
||||
drm_i915_private_t *dev_priv = dev->dev_private;
|
||||
int i, ret;
|
||||
|
||||
if ((dwords+1) * sizeof(int) >= dev_priv->render_ring.size - 8)
|
||||
if ((dwords+1) * sizeof(int) >= LP_RING(dev_priv)->size - 8)
|
||||
return -EINVAL;
|
||||
|
||||
for (i = 0; i < dwords;) {
|
||||
@ -565,13 +568,12 @@ static int i915_dispatch_flip(struct drm_device * dev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int i915_quiescent(struct drm_device * dev)
|
||||
static int i915_quiescent(struct drm_device *dev)
|
||||
{
|
||||
drm_i915_private_t *dev_priv = dev->dev_private;
|
||||
struct intel_ring_buffer *ring = LP_RING(dev->dev_private);
|
||||
|
||||
i915_kernel_lost_context(dev);
|
||||
return intel_wait_ring_buffer(&dev_priv->render_ring,
|
||||
dev_priv->render_ring.size - 8);
|
||||
return intel_wait_ring_buffer(ring, ring->size - 8);
|
||||
}
|
||||
|
||||
static int i915_flush_ioctl(struct drm_device *dev, void *data,
|
||||
@ -828,7 +830,7 @@ static int i915_set_status_page(struct drm_device *dev, void *data,
|
||||
{
|
||||
drm_i915_private_t *dev_priv = dev->dev_private;
|
||||
drm_i915_hws_addr_t *hws = data;
|
||||
struct intel_ring_buffer *ring = &dev_priv->render_ring;
|
||||
struct intel_ring_buffer *ring = LP_RING(dev_priv);
|
||||
|
||||
if (!I915_NEED_GFX_HWS(dev))
|
||||
return -EINVAL;
|
||||
@ -1978,7 +1980,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
|
||||
if (!IS_I945G(dev) && !IS_I945GM(dev))
|
||||
pci_enable_msi(dev->pdev);
|
||||
|
||||
spin_lock_init(&dev_priv->user_irq_lock);
|
||||
spin_lock_init(&dev_priv->irq_lock);
|
||||
spin_lock_init(&dev_priv->error_lock);
|
||||
dev_priv->trace_irq_seqno = 0;
|
||||
|
||||
|
@ -487,11 +487,11 @@ int i915_reset(struct drm_device *dev, u8 flags)
|
||||
!dev_priv->mm.suspended) {
|
||||
dev_priv->mm.suspended = 0;
|
||||
|
||||
dev_priv->render_ring.init(&dev_priv->render_ring);
|
||||
dev_priv->ring[RCS].init(&dev_priv->ring[RCS]);
|
||||
if (HAS_BSD(dev))
|
||||
dev_priv->bsd_ring.init(&dev_priv->bsd_ring);
|
||||
dev_priv->ring[VCS].init(&dev_priv->ring[VCS]);
|
||||
if (HAS_BLT(dev))
|
||||
dev_priv->blt_ring.init(&dev_priv->blt_ring);
|
||||
dev_priv->ring[BCS].init(&dev_priv->ring[BCS]);
|
||||
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
drm_irq_uninstall(dev);
|
||||
|
@ -269,9 +269,7 @@ typedef struct drm_i915_private {
|
||||
} *gmbus;
|
||||
|
||||
struct pci_dev *bridge_dev;
|
||||
struct intel_ring_buffer render_ring;
|
||||
struct intel_ring_buffer bsd_ring;
|
||||
struct intel_ring_buffer blt_ring;
|
||||
struct intel_ring_buffer ring[I915_NUM_RINGS];
|
||||
uint32_t next_seqno;
|
||||
|
||||
drm_dma_handle_t *status_page_dmah;
|
||||
@ -290,19 +288,15 @@ typedef struct drm_i915_private {
|
||||
int page_flipping;
|
||||
|
||||
atomic_t irq_received;
|
||||
/** Protects user_irq_refcount and irq_mask_reg */
|
||||
spinlock_t user_irq_lock;
|
||||
u32 trace_irq_seqno;
|
||||
|
||||
/* protects the irq masks */
|
||||
spinlock_t irq_lock;
|
||||
/** Cached value of IMR to avoid reads in updating the bitfield */
|
||||
u32 irq_mask_reg;
|
||||
u32 pipestat[2];
|
||||
/** splitted irq regs for graphics and display engine on Ironlake,
|
||||
irq_mask_reg is still used for display irq. */
|
||||
u32 gt_irq_mask_reg;
|
||||
u32 gt_irq_enable_reg;
|
||||
u32 de_irq_enable_reg;
|
||||
u32 pch_irq_mask_reg;
|
||||
u32 pch_irq_enable_reg;
|
||||
u32 irq_mask;
|
||||
u32 gt_irq_mask;
|
||||
u32 pch_irq_mask;
|
||||
|
||||
u32 hotplug_supported_mask;
|
||||
struct work_struct hotplug_work;
|
||||
@ -1104,7 +1098,8 @@ int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
|
||||
int __must_check i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
|
||||
bool interruptible);
|
||||
void i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
|
||||
struct intel_ring_buffer *ring);
|
||||
struct intel_ring_buffer *ring,
|
||||
u32 seqno);
|
||||
|
||||
/**
|
||||
* Returns true if seq1 is later than seq2.
|
||||
@ -1272,6 +1267,17 @@ extern void intel_display_print_error_state(struct seq_file *m,
|
||||
struct intel_display_error_state *error);
|
||||
#endif
|
||||
|
||||
#define LP_RING(d) (&((struct drm_i915_private *)(d))->ring[RCS])
|
||||
|
||||
#define BEGIN_LP_RING(n) \
|
||||
intel_ring_begin(LP_RING(dev_priv), (n))
|
||||
|
||||
#define OUT_RING(x) \
|
||||
intel_ring_emit(LP_RING(dev_priv), x)
|
||||
|
||||
#define ADVANCE_LP_RING() \
|
||||
intel_ring_advance(LP_RING(dev_priv))
|
||||
|
||||
/**
|
||||
* Lock test for when it's just for synchronization of ring access.
|
||||
*
|
||||
@ -1279,8 +1285,7 @@ extern void intel_display_print_error_state(struct seq_file *m,
|
||||
* has access to the ring.
|
||||
*/
|
||||
#define RING_LOCK_TEST_WITH_RETURN(dev, file) do { \
|
||||
if (((drm_i915_private_t *)dev->dev_private)->render_ring.obj \
|
||||
== NULL) \
|
||||
if (LP_RING(dev->dev_private)->obj == NULL) \
|
||||
LOCK_TEST_WITH_RETURN(dev, file); \
|
||||
} while (0)
|
||||
|
||||
@ -1366,15 +1371,6 @@ i915_write(struct drm_i915_private *dev_priv, u32 reg, u64 val, int len)
|
||||
}
|
||||
}
|
||||
|
||||
#define BEGIN_LP_RING(n) \
|
||||
intel_ring_begin(&dev_priv->render_ring, (n))
|
||||
|
||||
#define OUT_RING(x) \
|
||||
intel_ring_emit(&dev_priv->render_ring, x)
|
||||
|
||||
#define ADVANCE_LP_RING() \
|
||||
intel_ring_advance(&dev_priv->render_ring)
|
||||
|
||||
/**
|
||||
* Reads a dword out of the status page, which is written to from the command
|
||||
* queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or
|
||||
@ -1391,7 +1387,7 @@ i915_write(struct drm_i915_private *dev_priv, u32 reg, u64 val, int len)
|
||||
* The area from dword 0x20 to 0x3ff is available for driver usage.
|
||||
*/
|
||||
#define READ_HWSP(dev_priv, reg) (((volatile u32 *)\
|
||||
(dev_priv->render_ring.status_page.page_addr))[reg])
|
||||
(LP_RING(dev_priv)->status_page.page_addr))[reg])
|
||||
#define READ_BREADCRUMB(dev_priv) READ_HWSP(dev_priv, I915_BREADCRUMB_INDEX)
|
||||
#define I915_GEM_HWS_INDEX 0x20
|
||||
#define I915_BREADCRUMB_INDEX 0x21
|
||||
|
@ -1561,11 +1561,11 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
|
||||
|
||||
void
|
||||
i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
|
||||
struct intel_ring_buffer *ring)
|
||||
struct intel_ring_buffer *ring,
|
||||
u32 seqno)
|
||||
{
|
||||
struct drm_device *dev = obj->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
uint32_t seqno = i915_gem_next_request_seqno(dev, ring);
|
||||
|
||||
BUG_ON(ring == NULL);
|
||||
obj->ring = ring;
|
||||
@ -1679,7 +1679,8 @@ i915_gem_process_flushing_list(struct drm_device *dev,
|
||||
|
||||
obj->base.write_domain = 0;
|
||||
list_del_init(&obj->gpu_write_list);
|
||||
i915_gem_object_move_to_active(obj, ring);
|
||||
i915_gem_object_move_to_active(obj, ring,
|
||||
i915_gem_next_request_seqno(dev, ring));
|
||||
|
||||
trace_i915_gem_object_change_domain(obj,
|
||||
obj->base.read_domains,
|
||||
@ -1804,10 +1805,10 @@ void i915_gem_reset(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_gem_object *obj;
|
||||
int i;
|
||||
|
||||
i915_gem_reset_ring_lists(dev_priv, &dev_priv->render_ring);
|
||||
i915_gem_reset_ring_lists(dev_priv, &dev_priv->bsd_ring);
|
||||
i915_gem_reset_ring_lists(dev_priv, &dev_priv->blt_ring);
|
||||
for (i = 0; i < I915_NUM_RINGS; i++)
|
||||
i915_gem_reset_ring_lists(dev_priv, &dev_priv->ring[i]);
|
||||
|
||||
/* Remove anything from the flushing lists. The GPU cache is likely
|
||||
* to be lost on reset along with the data, so simply move the
|
||||
@ -1846,6 +1847,7 @@ i915_gem_retire_requests_ring(struct drm_device *dev,
|
||||
{
|
||||
drm_i915_private_t *dev_priv = dev->dev_private;
|
||||
uint32_t seqno;
|
||||
int i;
|
||||
|
||||
if (!ring->status_page.page_addr ||
|
||||
list_empty(&ring->request_list))
|
||||
@ -1854,6 +1856,11 @@ i915_gem_retire_requests_ring(struct drm_device *dev,
|
||||
WARN_ON(i915_verify_lists(dev));
|
||||
|
||||
seqno = ring->get_seqno(ring);
|
||||
|
||||
for (i = 0; i < I915_NUM_RINGS; i++)
|
||||
if (seqno >= ring->sync_seqno[i])
|
||||
ring->sync_seqno[i] = 0;
|
||||
|
||||
while (!list_empty(&ring->request_list)) {
|
||||
struct drm_i915_gem_request *request;
|
||||
|
||||
@ -1892,7 +1899,7 @@ i915_gem_retire_requests_ring(struct drm_device *dev,
|
||||
|
||||
if (unlikely (dev_priv->trace_irq_seqno &&
|
||||
i915_seqno_passed(dev_priv->trace_irq_seqno, seqno))) {
|
||||
ring->user_irq_put(ring);
|
||||
ring->irq_put(ring);
|
||||
dev_priv->trace_irq_seqno = 0;
|
||||
}
|
||||
|
||||
@ -1903,6 +1910,7 @@ void
|
||||
i915_gem_retire_requests(struct drm_device *dev)
|
||||
{
|
||||
drm_i915_private_t *dev_priv = dev->dev_private;
|
||||
int i;
|
||||
|
||||
if (!list_empty(&dev_priv->mm.deferred_free_list)) {
|
||||
struct drm_i915_gem_object *obj, *next;
|
||||
@ -1918,9 +1926,8 @@ i915_gem_retire_requests(struct drm_device *dev)
|
||||
i915_gem_free_object_tail(obj);
|
||||
}
|
||||
|
||||
i915_gem_retire_requests_ring(dev, &dev_priv->render_ring);
|
||||
i915_gem_retire_requests_ring(dev, &dev_priv->bsd_ring);
|
||||
i915_gem_retire_requests_ring(dev, &dev_priv->blt_ring);
|
||||
for (i = 0; i < I915_NUM_RINGS; i++)
|
||||
i915_gem_retire_requests_ring(dev, &dev_priv->ring[i]);
|
||||
}
|
||||
|
||||
static void
|
||||
@ -1942,9 +1949,9 @@ i915_gem_retire_work_handler(struct work_struct *work)
|
||||
i915_gem_retire_requests(dev);
|
||||
|
||||
if (!dev_priv->mm.suspended &&
|
||||
(!list_empty(&dev_priv->render_ring.request_list) ||
|
||||
!list_empty(&dev_priv->bsd_ring.request_list) ||
|
||||
!list_empty(&dev_priv->blt_ring.request_list)))
|
||||
(!list_empty(&dev_priv->ring[RCS].request_list) ||
|
||||
!list_empty(&dev_priv->ring[VCS].request_list) ||
|
||||
!list_empty(&dev_priv->ring[BCS].request_list)))
|
||||
queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
}
|
||||
@ -1993,7 +2000,7 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno,
|
||||
trace_i915_gem_request_wait_begin(dev, seqno);
|
||||
|
||||
ring->waiting_seqno = seqno;
|
||||
ring->user_irq_get(ring);
|
||||
ring->irq_get(ring);
|
||||
if (interruptible)
|
||||
ret = wait_event_interruptible(ring->irq_queue,
|
||||
i915_seqno_passed(ring->get_seqno(ring), seqno)
|
||||
@ -2003,7 +2010,7 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno,
|
||||
i915_seqno_passed(ring->get_seqno(ring), seqno)
|
||||
|| atomic_read(&dev_priv->mm.wedged));
|
||||
|
||||
ring->user_irq_put(ring);
|
||||
ring->irq_put(ring);
|
||||
ring->waiting_seqno = 0;
|
||||
|
||||
trace_i915_gem_request_wait_end(dev, seqno);
|
||||
@ -2159,7 +2166,7 @@ i915_gpu_idle(struct drm_device *dev)
|
||||
{
|
||||
drm_i915_private_t *dev_priv = dev->dev_private;
|
||||
bool lists_empty;
|
||||
int ret;
|
||||
int ret, i;
|
||||
|
||||
lists_empty = (list_empty(&dev_priv->mm.flushing_list) &&
|
||||
list_empty(&dev_priv->mm.active_list));
|
||||
@ -2167,17 +2174,11 @@ i915_gpu_idle(struct drm_device *dev)
|
||||
return 0;
|
||||
|
||||
/* Flush everything onto the inactive list. */
|
||||
ret = i915_ring_idle(dev, &dev_priv->render_ring);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = i915_ring_idle(dev, &dev_priv->bsd_ring);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = i915_ring_idle(dev, &dev_priv->blt_ring);
|
||||
if (ret)
|
||||
return ret;
|
||||
for (i = 0; i < I915_NUM_RINGS; i++) {
|
||||
ret = i915_ring_idle(dev, &dev_priv->ring[i]);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -3153,11 +3154,11 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
|
||||
* generation is designed to be run atomically and so is
|
||||
* lockless.
|
||||
*/
|
||||
ring->user_irq_get(ring);
|
||||
ring->irq_get(ring);
|
||||
ret = wait_event_interruptible(ring->irq_queue,
|
||||
i915_seqno_passed(ring->get_seqno(ring), seqno)
|
||||
|| atomic_read(&dev_priv->mm.wedged));
|
||||
ring->user_irq_put(ring);
|
||||
ring->irq_put(ring);
|
||||
|
||||
if (ret == 0 && atomic_read(&dev_priv->mm.wedged))
|
||||
ret = -EIO;
|
||||
@ -3584,9 +3585,9 @@ i915_gem_init_ringbuffer(struct drm_device *dev)
|
||||
return 0;
|
||||
|
||||
cleanup_bsd_ring:
|
||||
intel_cleanup_ring_buffer(&dev_priv->bsd_ring);
|
||||
intel_cleanup_ring_buffer(&dev_priv->ring[VCS]);
|
||||
cleanup_render_ring:
|
||||
intel_cleanup_ring_buffer(&dev_priv->render_ring);
|
||||
intel_cleanup_ring_buffer(&dev_priv->ring[RCS]);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -3594,10 +3595,10 @@ void
|
||||
i915_gem_cleanup_ringbuffer(struct drm_device *dev)
|
||||
{
|
||||
drm_i915_private_t *dev_priv = dev->dev_private;
|
||||
int i;
|
||||
|
||||
intel_cleanup_ring_buffer(&dev_priv->render_ring);
|
||||
intel_cleanup_ring_buffer(&dev_priv->bsd_ring);
|
||||
intel_cleanup_ring_buffer(&dev_priv->blt_ring);
|
||||
for (i = 0; i < I915_NUM_RINGS; i++)
|
||||
intel_cleanup_ring_buffer(&dev_priv->ring[i]);
|
||||
}
|
||||
|
||||
int
|
||||
@ -3605,7 +3606,7 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv)
|
||||
{
|
||||
drm_i915_private_t *dev_priv = dev->dev_private;
|
||||
int ret;
|
||||
int ret, i;
|
||||
|
||||
if (drm_core_check_feature(dev, DRIVER_MODESET))
|
||||
return 0;
|
||||
@ -3625,14 +3626,12 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
|
||||
}
|
||||
|
||||
BUG_ON(!list_empty(&dev_priv->mm.active_list));
|
||||
BUG_ON(!list_empty(&dev_priv->render_ring.active_list));
|
||||
BUG_ON(!list_empty(&dev_priv->bsd_ring.active_list));
|
||||
BUG_ON(!list_empty(&dev_priv->blt_ring.active_list));
|
||||
BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
|
||||
BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
|
||||
BUG_ON(!list_empty(&dev_priv->render_ring.request_list));
|
||||
BUG_ON(!list_empty(&dev_priv->bsd_ring.request_list));
|
||||
BUG_ON(!list_empty(&dev_priv->blt_ring.request_list));
|
||||
for (i = 0; i < I915_NUM_RINGS; i++) {
|
||||
BUG_ON(!list_empty(&dev_priv->ring[i].active_list));
|
||||
BUG_ON(!list_empty(&dev_priv->ring[i].request_list));
|
||||
}
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
ret = drm_irq_install(dev);
|
||||
@ -3695,9 +3694,8 @@ i915_gem_load(struct drm_device *dev)
|
||||
INIT_LIST_HEAD(&dev_priv->mm.fence_list);
|
||||
INIT_LIST_HEAD(&dev_priv->mm.deferred_free_list);
|
||||
INIT_LIST_HEAD(&dev_priv->mm.gtt_list);
|
||||
init_ring_lists(&dev_priv->render_ring);
|
||||
init_ring_lists(&dev_priv->bsd_ring);
|
||||
init_ring_lists(&dev_priv->blt_ring);
|
||||
for (i = 0; i < I915_NUM_RINGS; i++)
|
||||
init_ring_lists(&dev_priv->ring[i]);
|
||||
for (i = 0; i < 16; i++)
|
||||
INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
|
||||
INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
|
||||
|
@ -632,23 +632,59 @@ i915_gem_execbuffer_flush(struct drm_device *dev,
|
||||
uint32_t flush_rings)
|
||||
{
|
||||
drm_i915_private_t *dev_priv = dev->dev_private;
|
||||
int i;
|
||||
|
||||
if (flush_domains & I915_GEM_DOMAIN_CPU)
|
||||
intel_gtt_chipset_flush();
|
||||
|
||||
if ((flush_domains | invalidate_domains) & I915_GEM_GPU_DOMAINS) {
|
||||
if (flush_rings & RING_RENDER)
|
||||
i915_gem_flush_ring(dev, &dev_priv->render_ring,
|
||||
invalidate_domains, flush_domains);
|
||||
if (flush_rings & RING_BSD)
|
||||
i915_gem_flush_ring(dev, &dev_priv->bsd_ring,
|
||||
invalidate_domains, flush_domains);
|
||||
if (flush_rings & RING_BLT)
|
||||
i915_gem_flush_ring(dev, &dev_priv->blt_ring,
|
||||
invalidate_domains, flush_domains);
|
||||
for (i = 0; i < I915_NUM_RINGS; i++)
|
||||
if (flush_rings & (1 << i))
|
||||
i915_gem_flush_ring(dev, &dev_priv->ring[i],
|
||||
invalidate_domains,
|
||||
flush_domains);
|
||||
}
|
||||
}
|
||||
|
||||
static int
|
||||
i915_gem_execbuffer_sync_rings(struct drm_i915_gem_object *obj,
|
||||
struct intel_ring_buffer *to)
|
||||
{
|
||||
struct intel_ring_buffer *from = obj->ring;
|
||||
u32 seqno;
|
||||
int ret, idx;
|
||||
|
||||
if (from == NULL || to == from)
|
||||
return 0;
|
||||
|
||||
if (INTEL_INFO(obj->base.dev)->gen < 6)
|
||||
return i915_gem_object_wait_rendering(obj, true);
|
||||
|
||||
idx = intel_ring_sync_index(from, to);
|
||||
|
||||
seqno = obj->last_rendering_seqno;
|
||||
if (seqno <= from->sync_seqno[idx])
|
||||
return 0;
|
||||
|
||||
if (seqno == from->outstanding_lazy_request) {
|
||||
struct drm_i915_gem_request *request;
|
||||
|
||||
request = kzalloc(sizeof(*request), GFP_KERNEL);
|
||||
if (request == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
ret = i915_add_request(obj->base.dev, NULL, request, from);
|
||||
if (ret) {
|
||||
kfree(request);
|
||||
return ret;
|
||||
}
|
||||
|
||||
seqno = request->seqno;
|
||||
}
|
||||
|
||||
from->sync_seqno[idx] = seqno;
|
||||
return intel_ring_sync(to, from, seqno - 1);
|
||||
}
|
||||
|
||||
static int
|
||||
i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
|
||||
@ -678,12 +714,9 @@ i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
|
||||
}
|
||||
|
||||
list_for_each_entry(obj, objects, exec_list) {
|
||||
/* XXX replace with semaphores */
|
||||
if (obj->ring && ring != obj->ring) {
|
||||
ret = i915_gem_object_wait_rendering(obj, true);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
ret = i915_gem_execbuffer_sync_rings(obj, ring);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
@ -769,7 +802,8 @@ i915_gem_execbuffer_wait_for_flips(struct intel_ring_buffer *ring,
|
||||
|
||||
static void
|
||||
i915_gem_execbuffer_move_to_active(struct list_head *objects,
|
||||
struct intel_ring_buffer *ring)
|
||||
struct intel_ring_buffer *ring,
|
||||
u32 seqno)
|
||||
{
|
||||
struct drm_i915_gem_object *obj;
|
||||
|
||||
@ -778,7 +812,7 @@ i915_gem_execbuffer_move_to_active(struct list_head *objects,
|
||||
obj->base.write_domain = obj->base.pending_write_domain;
|
||||
obj->fenced_gpu_access = obj->pending_fenced_gpu_access;
|
||||
|
||||
i915_gem_object_move_to_active(obj, ring);
|
||||
i915_gem_object_move_to_active(obj, ring, seqno);
|
||||
if (obj->base.write_domain) {
|
||||
obj->dirty = 1;
|
||||
obj->pending_gpu_write = true;
|
||||
@ -833,6 +867,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
|
||||
struct drm_clip_rect *cliprects = NULL;
|
||||
struct intel_ring_buffer *ring;
|
||||
u32 exec_start, exec_len;
|
||||
u32 seqno;
|
||||
int ret, i;
|
||||
|
||||
if (!i915_gem_check_execbuffer(args)) {
|
||||
@ -851,21 +886,21 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
|
||||
switch (args->flags & I915_EXEC_RING_MASK) {
|
||||
case I915_EXEC_DEFAULT:
|
||||
case I915_EXEC_RENDER:
|
||||
ring = &dev_priv->render_ring;
|
||||
ring = &dev_priv->ring[RCS];
|
||||
break;
|
||||
case I915_EXEC_BSD:
|
||||
if (!HAS_BSD(dev)) {
|
||||
DRM_ERROR("execbuf with invalid ring (BSD)\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
ring = &dev_priv->bsd_ring;
|
||||
ring = &dev_priv->ring[VCS];
|
||||
break;
|
||||
case I915_EXEC_BLT:
|
||||
if (!HAS_BLT(dev)) {
|
||||
DRM_ERROR("execbuf with invalid ring (BLT)\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
ring = &dev_priv->blt_ring;
|
||||
ring = &dev_priv->ring[BCS];
|
||||
break;
|
||||
default:
|
||||
DRM_ERROR("execbuf with unknown ring: %d\n",
|
||||
@ -879,7 +914,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
|
||||
}
|
||||
|
||||
if (args->num_cliprects != 0) {
|
||||
if (ring != &dev_priv->render_ring) {
|
||||
if (ring != &dev_priv->ring[RCS]) {
|
||||
DRM_ERROR("clip rectangles are only valid with the render ring\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
@ -972,6 +1007,21 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
seqno = i915_gem_next_request_seqno(dev, ring);
|
||||
for (i = 0; i < I915_NUM_RINGS-1; i++) {
|
||||
if (seqno < ring->sync_seqno[i]) {
|
||||
/* The GPU can not handle its semaphore value wrapping,
|
||||
* so every billion or so execbuffers, we need to stall
|
||||
* the GPU in order to reset the counters.
|
||||
*/
|
||||
ret = i915_gpu_idle(dev);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
BUG_ON(ring->sync_seqno[i]);
|
||||
}
|
||||
}
|
||||
|
||||
exec_start = batch_obj->gtt_offset + args->batch_start_offset;
|
||||
exec_len = args->batch_len;
|
||||
if (cliprects) {
|
||||
@ -992,7 +1042,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
|
||||
goto err;
|
||||
}
|
||||
|
||||
i915_gem_execbuffer_move_to_active(&objects, ring);
|
||||
i915_gem_execbuffer_move_to_active(&objects, ring, seqno);
|
||||
i915_gem_execbuffer_retire_commands(dev, file, ring);
|
||||
|
||||
err:
|
||||
|
@ -67,9 +67,9 @@
|
||||
void
|
||||
ironlake_enable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask)
|
||||
{
|
||||
if ((dev_priv->gt_irq_mask_reg & mask) != 0) {
|
||||
dev_priv->gt_irq_mask_reg &= ~mask;
|
||||
I915_WRITE(GTIMR, dev_priv->gt_irq_mask_reg);
|
||||
if ((dev_priv->gt_irq_mask & mask) != 0) {
|
||||
dev_priv->gt_irq_mask &= ~mask;
|
||||
I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
|
||||
POSTING_READ(GTIMR);
|
||||
}
|
||||
}
|
||||
@ -77,9 +77,9 @@ ironlake_enable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask)
|
||||
void
|
||||
ironlake_disable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask)
|
||||
{
|
||||
if ((dev_priv->gt_irq_mask_reg & mask) != mask) {
|
||||
dev_priv->gt_irq_mask_reg |= mask;
|
||||
I915_WRITE(GTIMR, dev_priv->gt_irq_mask_reg);
|
||||
if ((dev_priv->gt_irq_mask & mask) != mask) {
|
||||
dev_priv->gt_irq_mask |= mask;
|
||||
I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
|
||||
POSTING_READ(GTIMR);
|
||||
}
|
||||
}
|
||||
@ -88,9 +88,9 @@ ironlake_disable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask)
|
||||
static void
|
||||
ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
|
||||
{
|
||||
if ((dev_priv->irq_mask_reg & mask) != 0) {
|
||||
dev_priv->irq_mask_reg &= ~mask;
|
||||
I915_WRITE(DEIMR, dev_priv->irq_mask_reg);
|
||||
if ((dev_priv->irq_mask & mask) != 0) {
|
||||
dev_priv->irq_mask &= ~mask;
|
||||
I915_WRITE(DEIMR, dev_priv->irq_mask);
|
||||
POSTING_READ(DEIMR);
|
||||
}
|
||||
}
|
||||
@ -98,9 +98,9 @@ ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
|
||||
static inline void
|
||||
ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
|
||||
{
|
||||
if ((dev_priv->irq_mask_reg & mask) != mask) {
|
||||
dev_priv->irq_mask_reg |= mask;
|
||||
I915_WRITE(DEIMR, dev_priv->irq_mask_reg);
|
||||
if ((dev_priv->irq_mask & mask) != mask) {
|
||||
dev_priv->irq_mask |= mask;
|
||||
I915_WRITE(DEIMR, dev_priv->irq_mask);
|
||||
POSTING_READ(DEIMR);
|
||||
}
|
||||
}
|
||||
@ -108,9 +108,9 @@ ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
|
||||
void
|
||||
i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask)
|
||||
{
|
||||
if ((dev_priv->irq_mask_reg & mask) != 0) {
|
||||
dev_priv->irq_mask_reg &= ~mask;
|
||||
I915_WRITE(IMR, dev_priv->irq_mask_reg);
|
||||
if ((dev_priv->irq_mask & mask) != 0) {
|
||||
dev_priv->irq_mask &= ~mask;
|
||||
I915_WRITE(IMR, dev_priv->irq_mask);
|
||||
POSTING_READ(IMR);
|
||||
}
|
||||
}
|
||||
@ -118,9 +118,9 @@ i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask)
|
||||
void
|
||||
i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask)
|
||||
{
|
||||
if ((dev_priv->irq_mask_reg & mask) != mask) {
|
||||
dev_priv->irq_mask_reg |= mask;
|
||||
I915_WRITE(IMR, dev_priv->irq_mask_reg);
|
||||
if ((dev_priv->irq_mask & mask) != mask) {
|
||||
dev_priv->irq_mask |= mask;
|
||||
I915_WRITE(IMR, dev_priv->irq_mask);
|
||||
POSTING_READ(IMR);
|
||||
}
|
||||
}
|
||||
@ -163,9 +163,12 @@ i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
|
||||
/**
|
||||
* intel_enable_asle - enable ASLE interrupt for OpRegion
|
||||
*/
|
||||
void intel_enable_asle (struct drm_device *dev)
|
||||
void intel_enable_asle(struct drm_device *dev)
|
||||
{
|
||||
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
|
||||
drm_i915_private_t *dev_priv = dev->dev_private;
|
||||
unsigned long irqflags;
|
||||
|
||||
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
|
||||
|
||||
if (HAS_PCH_SPLIT(dev))
|
||||
ironlake_enable_display_irq(dev_priv, DE_GSE);
|
||||
@ -176,6 +179,8 @@ void intel_enable_asle (struct drm_device *dev)
|
||||
i915_enable_pipestat(dev_priv, 0,
|
||||
PIPE_LEGACY_BLC_EVENT_ENABLE);
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -344,12 +349,12 @@ static irqreturn_t ironlake_irq_handler(struct drm_device *dev)
|
||||
READ_BREADCRUMB(dev_priv);
|
||||
}
|
||||
|
||||
if (gt_iir & GT_PIPE_NOTIFY)
|
||||
notify_ring(dev, &dev_priv->render_ring);
|
||||
if (gt_iir & (GT_USER_INTERRUPT | GT_PIPE_NOTIFY))
|
||||
notify_ring(dev, &dev_priv->ring[RCS]);
|
||||
if (gt_iir & bsd_usr_interrupt)
|
||||
notify_ring(dev, &dev_priv->bsd_ring);
|
||||
if (HAS_BLT(dev) && gt_iir & GT_BLT_USER_INTERRUPT)
|
||||
notify_ring(dev, &dev_priv->blt_ring);
|
||||
notify_ring(dev, &dev_priv->ring[VCS]);
|
||||
if (gt_iir & GT_BLT_USER_INTERRUPT)
|
||||
notify_ring(dev, &dev_priv->ring[BCS]);
|
||||
|
||||
if (de_iir & DE_GSE)
|
||||
intel_opregion_gse_intr(dev);
|
||||
@ -640,8 +645,7 @@ static void i915_capture_error_state(struct drm_device *dev)
|
||||
|
||||
DRM_DEBUG_DRIVER("generating error event\n");
|
||||
|
||||
error->seqno =
|
||||
dev_priv->render_ring.get_seqno(&dev_priv->render_ring);
|
||||
error->seqno = dev_priv->ring[RCS].get_seqno(&dev_priv->ring[RCS]);
|
||||
error->eir = I915_READ(EIR);
|
||||
error->pgtbl_er = I915_READ(PGTBL_ER);
|
||||
error->pipeastat = I915_READ(PIPEASTAT);
|
||||
@ -656,16 +660,16 @@ static void i915_capture_error_state(struct drm_device *dev)
|
||||
error->bcs_ipeir = I915_READ(BCS_IPEIR);
|
||||
error->bcs_instdone = I915_READ(BCS_INSTDONE);
|
||||
error->bcs_seqno = 0;
|
||||
if (dev_priv->blt_ring.get_seqno)
|
||||
error->bcs_seqno = dev_priv->blt_ring.get_seqno(&dev_priv->blt_ring);
|
||||
if (dev_priv->ring[BCS].get_seqno)
|
||||
error->bcs_seqno = dev_priv->ring[BCS].get_seqno(&dev_priv->ring[BCS]);
|
||||
|
||||
error->vcs_acthd = I915_READ(VCS_ACTHD);
|
||||
error->vcs_ipehr = I915_READ(VCS_IPEHR);
|
||||
error->vcs_ipeir = I915_READ(VCS_IPEIR);
|
||||
error->vcs_instdone = I915_READ(VCS_INSTDONE);
|
||||
error->vcs_seqno = 0;
|
||||
if (dev_priv->bsd_ring.get_seqno)
|
||||
error->vcs_seqno = dev_priv->bsd_ring.get_seqno(&dev_priv->bsd_ring);
|
||||
if (dev_priv->ring[VCS].get_seqno)
|
||||
error->vcs_seqno = dev_priv->ring[VCS].get_seqno(&dev_priv->ring[VCS]);
|
||||
}
|
||||
if (INTEL_INFO(dev)->gen >= 4) {
|
||||
error->ipeir = I915_READ(IPEIR_I965);
|
||||
@ -684,7 +688,7 @@ static void i915_capture_error_state(struct drm_device *dev)
|
||||
}
|
||||
i915_gem_record_fences(dev, error);
|
||||
|
||||
bbaddr = i915_ringbuffer_last_batch(dev, &dev_priv->render_ring);
|
||||
bbaddr = i915_ringbuffer_last_batch(dev, &dev_priv->ring[RCS]);
|
||||
|
||||
/* Grab the current batchbuffer, most likely to have crashed. */
|
||||
batchbuffer[0] = NULL;
|
||||
@ -748,7 +752,7 @@ static void i915_capture_error_state(struct drm_device *dev)
|
||||
|
||||
/* Record the ringbuffer */
|
||||
error->ringbuffer = i915_error_object_create(dev,
|
||||
dev_priv->render_ring.obj);
|
||||
dev_priv->ring[RCS].obj);
|
||||
|
||||
/* Record buffers on the active and pinned lists. */
|
||||
error->active_bo = NULL;
|
||||
@ -949,11 +953,11 @@ void i915_handle_error(struct drm_device *dev, bool wedged)
|
||||
/*
|
||||
* Wakeup waiting processes so they don't hang
|
||||
*/
|
||||
wake_up_all(&dev_priv->render_ring.irq_queue);
|
||||
wake_up_all(&dev_priv->ring[RCS].irq_queue);
|
||||
if (HAS_BSD(dev))
|
||||
wake_up_all(&dev_priv->bsd_ring.irq_queue);
|
||||
wake_up_all(&dev_priv->ring[VCS].irq_queue);
|
||||
if (HAS_BLT(dev))
|
||||
wake_up_all(&dev_priv->blt_ring.irq_queue);
|
||||
wake_up_all(&dev_priv->ring[BCS].irq_queue);
|
||||
}
|
||||
|
||||
queue_work(dev_priv->wq, &dev_priv->error_work);
|
||||
@ -1035,7 +1039,7 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
|
||||
* It doesn't set the bit in iir again, but it still produces
|
||||
* interrupts (for non-MSI).
|
||||
*/
|
||||
spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
|
||||
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
|
||||
pipea_stats = I915_READ(PIPEASTAT);
|
||||
pipeb_stats = I915_READ(PIPEBSTAT);
|
||||
|
||||
@ -1058,7 +1062,7 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
|
||||
I915_WRITE(PIPEBSTAT, pipeb_stats);
|
||||
irq_received = 1;
|
||||
}
|
||||
spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
|
||||
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
|
||||
|
||||
if (!irq_received)
|
||||
break;
|
||||
@ -1091,9 +1095,9 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
|
||||
}
|
||||
|
||||
if (iir & I915_USER_INTERRUPT)
|
||||
notify_ring(dev, &dev_priv->render_ring);
|
||||
if (HAS_BSD(dev) && (iir & I915_BSD_USER_INTERRUPT))
|
||||
notify_ring(dev, &dev_priv->bsd_ring);
|
||||
notify_ring(dev, &dev_priv->ring[RCS]);
|
||||
if (iir & I915_BSD_USER_INTERRUPT)
|
||||
notify_ring(dev, &dev_priv->ring[VCS]);
|
||||
|
||||
if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT) {
|
||||
intel_prepare_page_flip(dev, 0);
|
||||
@ -1180,10 +1184,10 @@ static int i915_emit_irq(struct drm_device * dev)
|
||||
void i915_trace_irq_get(struct drm_device *dev, u32 seqno)
|
||||
{
|
||||
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
|
||||
struct intel_ring_buffer *render_ring = &dev_priv->render_ring;
|
||||
struct intel_ring_buffer *ring = LP_RING(dev_priv);
|
||||
|
||||
if (dev_priv->trace_irq_seqno == 0)
|
||||
render_ring->user_irq_get(render_ring);
|
||||
ring->irq_get(ring);
|
||||
|
||||
dev_priv->trace_irq_seqno = seqno;
|
||||
}
|
||||
@ -1193,7 +1197,7 @@ static int i915_wait_irq(struct drm_device * dev, int irq_nr)
|
||||
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
|
||||
struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
|
||||
int ret = 0;
|
||||
struct intel_ring_buffer *render_ring = &dev_priv->render_ring;
|
||||
struct intel_ring_buffer *ring = LP_RING(dev_priv);
|
||||
|
||||
DRM_DEBUG_DRIVER("irq_nr=%d breadcrumb=%d\n", irq_nr,
|
||||
READ_BREADCRUMB(dev_priv));
|
||||
@ -1207,10 +1211,10 @@ static int i915_wait_irq(struct drm_device * dev, int irq_nr)
|
||||
if (master_priv->sarea_priv)
|
||||
master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
|
||||
|
||||
render_ring->user_irq_get(render_ring);
|
||||
DRM_WAIT_ON(ret, dev_priv->render_ring.irq_queue, 3 * DRM_HZ,
|
||||
ring->irq_get(ring);
|
||||
DRM_WAIT_ON(ret, ring->irq_queue, 3 * DRM_HZ,
|
||||
READ_BREADCRUMB(dev_priv) >= irq_nr);
|
||||
render_ring->user_irq_put(render_ring);
|
||||
ring->irq_put(ring);
|
||||
|
||||
if (ret == -EBUSY) {
|
||||
DRM_ERROR("EBUSY -- rec: %d emitted: %d\n",
|
||||
@ -1229,7 +1233,7 @@ int i915_irq_emit(struct drm_device *dev, void *data,
|
||||
drm_i915_irq_emit_t *emit = data;
|
||||
int result;
|
||||
|
||||
if (!dev_priv || !dev_priv->render_ring.virtual_start) {
|
||||
if (!dev_priv || !LP_RING(dev_priv)->virtual_start) {
|
||||
DRM_ERROR("called with no initialization\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
@ -1275,9 +1279,9 @@ int i915_enable_vblank(struct drm_device *dev, int pipe)
|
||||
if (!i915_pipe_enabled(dev, pipe))
|
||||
return -EINVAL;
|
||||
|
||||
spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
|
||||
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
|
||||
if (HAS_PCH_SPLIT(dev))
|
||||
ironlake_enable_display_irq(dev_priv, (pipe == 0) ?
|
||||
ironlake_enable_display_irq(dev_priv, (pipe == 0) ?
|
||||
DE_PIPEA_VBLANK: DE_PIPEB_VBLANK);
|
||||
else if (INTEL_INFO(dev)->gen >= 4)
|
||||
i915_enable_pipestat(dev_priv, pipe,
|
||||
@ -1285,7 +1289,7 @@ int i915_enable_vblank(struct drm_device *dev, int pipe)
|
||||
else
|
||||
i915_enable_pipestat(dev_priv, pipe,
|
||||
PIPE_VBLANK_INTERRUPT_ENABLE);
|
||||
spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
|
||||
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1297,15 +1301,15 @@ void i915_disable_vblank(struct drm_device *dev, int pipe)
|
||||
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
|
||||
unsigned long irqflags;
|
||||
|
||||
spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
|
||||
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
|
||||
if (HAS_PCH_SPLIT(dev))
|
||||
ironlake_disable_display_irq(dev_priv, (pipe == 0) ?
|
||||
ironlake_disable_display_irq(dev_priv, (pipe == 0) ?
|
||||
DE_PIPEA_VBLANK: DE_PIPEB_VBLANK);
|
||||
else
|
||||
i915_disable_pipestat(dev_priv, pipe,
|
||||
PIPE_VBLANK_INTERRUPT_ENABLE |
|
||||
PIPE_START_VBLANK_INTERRUPT_ENABLE);
|
||||
spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
|
||||
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
|
||||
}
|
||||
|
||||
void i915_enable_interrupt (struct drm_device *dev)
|
||||
@ -1397,6 +1401,27 @@ static bool i915_hangcheck_ring_idle(struct intel_ring_buffer *ring, bool *err)
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool kick_ring(struct intel_ring_buffer *ring)
|
||||
{
|
||||
struct drm_device *dev = ring->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
u32 tmp = I915_READ_CTL(ring);
|
||||
if (tmp & RING_WAIT) {
|
||||
DRM_ERROR("Kicking stuck wait on %s\n",
|
||||
ring->name);
|
||||
I915_WRITE_CTL(ring, tmp);
|
||||
return true;
|
||||
}
|
||||
if (IS_GEN6(dev) &&
|
||||
(tmp & RING_WAIT_SEMAPHORE)) {
|
||||
DRM_ERROR("Kicking stuck semaphore on %s\n",
|
||||
ring->name);
|
||||
I915_WRITE_CTL(ring, tmp);
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* This is called when the chip hasn't reported back with completed
|
||||
* batchbuffers in a long time. The first time this is called we simply record
|
||||
@ -1411,9 +1436,9 @@ void i915_hangcheck_elapsed(unsigned long data)
|
||||
bool err = false;
|
||||
|
||||
/* If all work is done then ACTHD clearly hasn't advanced. */
|
||||
if (i915_hangcheck_ring_idle(&dev_priv->render_ring, &err) &&
|
||||
i915_hangcheck_ring_idle(&dev_priv->bsd_ring, &err) &&
|
||||
i915_hangcheck_ring_idle(&dev_priv->blt_ring, &err)) {
|
||||
if (i915_hangcheck_ring_idle(&dev_priv->ring[RCS], &err) &&
|
||||
i915_hangcheck_ring_idle(&dev_priv->ring[VCS], &err) &&
|
||||
i915_hangcheck_ring_idle(&dev_priv->ring[BCS], &err)) {
|
||||
dev_priv->hangcheck_count = 0;
|
||||
if (err)
|
||||
goto repeat;
|
||||
@ -1442,12 +1467,17 @@ void i915_hangcheck_elapsed(unsigned long data)
|
||||
* and break the hang. This should work on
|
||||
* all but the second generation chipsets.
|
||||
*/
|
||||
struct intel_ring_buffer *ring = &dev_priv->render_ring;
|
||||
u32 tmp = I915_READ_CTL(ring);
|
||||
if (tmp & RING_WAIT) {
|
||||
I915_WRITE_CTL(ring, tmp);
|
||||
|
||||
if (kick_ring(&dev_priv->ring[RCS]))
|
||||
goto repeat;
|
||||
|
||||
if (HAS_BSD(dev) &&
|
||||
kick_ring(&dev_priv->ring[VCS]))
|
||||
goto repeat;
|
||||
|
||||
if (HAS_BLT(dev) &&
|
||||
kick_ring(&dev_priv->ring[BCS]))
|
||||
goto repeat;
|
||||
}
|
||||
}
|
||||
|
||||
i915_handle_error(dev, true);
|
||||
@ -1498,37 +1528,37 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
|
||||
/* enable kind of interrupts always enabled */
|
||||
u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
|
||||
DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE;
|
||||
u32 render_mask = GT_PIPE_NOTIFY | GT_BSD_USER_INTERRUPT;
|
||||
u32 render_irqs;
|
||||
u32 hotplug_mask;
|
||||
|
||||
dev_priv->irq_mask_reg = ~display_mask;
|
||||
dev_priv->de_irq_enable_reg = display_mask | DE_PIPEA_VBLANK | DE_PIPEB_VBLANK;
|
||||
dev_priv->irq_mask = ~display_mask;
|
||||
|
||||
/* should always can generate irq */
|
||||
I915_WRITE(DEIIR, I915_READ(DEIIR));
|
||||
I915_WRITE(DEIMR, dev_priv->irq_mask_reg);
|
||||
I915_WRITE(DEIER, dev_priv->de_irq_enable_reg);
|
||||
I915_WRITE(DEIMR, dev_priv->irq_mask);
|
||||
I915_WRITE(DEIER, display_mask | DE_PIPEA_VBLANK | DE_PIPEB_VBLANK);
|
||||
POSTING_READ(DEIER);
|
||||
|
||||
if (IS_GEN6(dev)) {
|
||||
render_mask =
|
||||
GT_PIPE_NOTIFY |
|
||||
GT_GEN6_BSD_USER_INTERRUPT |
|
||||
GT_BLT_USER_INTERRUPT;
|
||||
}
|
||||
|
||||
dev_priv->gt_irq_mask_reg = ~render_mask;
|
||||
dev_priv->gt_irq_enable_reg = render_mask;
|
||||
dev_priv->gt_irq_mask = ~0;
|
||||
|
||||
I915_WRITE(GTIIR, I915_READ(GTIIR));
|
||||
I915_WRITE(GTIMR, dev_priv->gt_irq_mask_reg);
|
||||
I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
|
||||
if (IS_GEN6(dev)) {
|
||||
I915_WRITE(GEN6_RENDER_IMR, ~GEN6_RENDER_PIPE_CONTROL_NOTIFY_INTERRUPT);
|
||||
I915_WRITE(GEN6_BSD_IMR, ~GEN6_BSD_IMR_USER_INTERRUPT);
|
||||
I915_WRITE(GEN6_RENDER_IMR, ~GEN6_RENDER_USER_INTERRUPT);
|
||||
I915_WRITE(GEN6_BSD_IMR, ~GEN6_BSD_USER_INTERRUPT);
|
||||
I915_WRITE(GEN6_BLITTER_IMR, ~GEN6_BLITTER_USER_INTERRUPT);
|
||||
}
|
||||
|
||||
I915_WRITE(GTIER, dev_priv->gt_irq_enable_reg);
|
||||
if (IS_GEN6(dev))
|
||||
render_irqs =
|
||||
GT_USER_INTERRUPT |
|
||||
GT_GEN6_BSD_USER_INTERRUPT |
|
||||
GT_BLT_USER_INTERRUPT;
|
||||
else
|
||||
render_irqs =
|
||||
GT_PIPE_NOTIFY |
|
||||
GT_BSD_USER_INTERRUPT;
|
||||
I915_WRITE(GTIER, render_irqs);
|
||||
POSTING_READ(GTIER);
|
||||
|
||||
if (HAS_PCH_CPT(dev)) {
|
||||
@ -1539,12 +1569,11 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
|
||||
SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG;
|
||||
}
|
||||
|
||||
dev_priv->pch_irq_mask_reg = ~hotplug_mask;
|
||||
dev_priv->pch_irq_enable_reg = hotplug_mask;
|
||||
dev_priv->pch_irq_mask = ~hotplug_mask;
|
||||
|
||||
I915_WRITE(SDEIIR, I915_READ(SDEIIR));
|
||||
I915_WRITE(SDEIMR, dev_priv->pch_irq_mask_reg);
|
||||
I915_WRITE(SDEIER, dev_priv->pch_irq_enable_reg);
|
||||
I915_WRITE(SDEIMR, dev_priv->pch_irq_mask);
|
||||
I915_WRITE(SDEIER, hotplug_mask);
|
||||
POSTING_READ(SDEIER);
|
||||
|
||||
if (IS_IRONLAKE_M(dev)) {
|
||||
@ -1594,11 +1623,11 @@ int i915_driver_irq_postinstall(struct drm_device *dev)
|
||||
u32 enable_mask = I915_INTERRUPT_ENABLE_FIX | I915_INTERRUPT_ENABLE_VAR;
|
||||
u32 error_mask;
|
||||
|
||||
DRM_INIT_WAITQUEUE(&dev_priv->render_ring.irq_queue);
|
||||
DRM_INIT_WAITQUEUE(&dev_priv->ring[RCS].irq_queue);
|
||||
if (HAS_BSD(dev))
|
||||
DRM_INIT_WAITQUEUE(&dev_priv->bsd_ring.irq_queue);
|
||||
DRM_INIT_WAITQUEUE(&dev_priv->ring[VCS].irq_queue);
|
||||
if (HAS_BLT(dev))
|
||||
DRM_INIT_WAITQUEUE(&dev_priv->blt_ring.irq_queue);
|
||||
DRM_INIT_WAITQUEUE(&dev_priv->ring[BCS].irq_queue);
|
||||
|
||||
dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
|
||||
|
||||
@ -1606,7 +1635,7 @@ int i915_driver_irq_postinstall(struct drm_device *dev)
|
||||
return ironlake_irq_postinstall(dev);
|
||||
|
||||
/* Unmask the interrupts that we always want on. */
|
||||
dev_priv->irq_mask_reg = ~I915_INTERRUPT_ENABLE_FIX;
|
||||
dev_priv->irq_mask = ~I915_INTERRUPT_ENABLE_FIX;
|
||||
|
||||
dev_priv->pipestat[0] = 0;
|
||||
dev_priv->pipestat[1] = 0;
|
||||
@ -1615,7 +1644,7 @@ int i915_driver_irq_postinstall(struct drm_device *dev)
|
||||
/* Enable in IER... */
|
||||
enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
|
||||
/* and unmask in IMR */
|
||||
dev_priv->irq_mask_reg &= ~I915_DISPLAY_PORT_INTERRUPT;
|
||||
dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1633,7 +1662,7 @@ int i915_driver_irq_postinstall(struct drm_device *dev)
|
||||
}
|
||||
I915_WRITE(EMR, error_mask);
|
||||
|
||||
I915_WRITE(IMR, dev_priv->irq_mask_reg);
|
||||
I915_WRITE(IMR, dev_priv->irq_mask);
|
||||
I915_WRITE(IER, enable_mask);
|
||||
POSTING_READ(IER);
|
||||
|
||||
|
@ -176,6 +176,11 @@
|
||||
#define MI_BATCH_NON_SECURE (1)
|
||||
#define MI_BATCH_NON_SECURE_I965 (1<<8)
|
||||
#define MI_BATCH_BUFFER_START MI_INSTR(0x31, 0)
|
||||
#define MI_SEMAPHORE_MBOX MI_INSTR(0x16, 1) /* gen6+ */
|
||||
#define MI_SEMAPHORE_GLOBAL_GTT (1<<22)
|
||||
#define MI_SEMAPHORE_UPDATE (1<<21)
|
||||
#define MI_SEMAPHORE_COMPARE (1<<20)
|
||||
#define MI_SEMAPHORE_REGISTER (1<<18)
|
||||
/*
|
||||
* 3D instructions used by the kernel
|
||||
*/
|
||||
@ -276,9 +281,12 @@
|
||||
#define RING_HEAD(base) ((base)+0x34)
|
||||
#define RING_START(base) ((base)+0x38)
|
||||
#define RING_CTL(base) ((base)+0x3c)
|
||||
#define RING_SYNC_0(base) ((base)+0x40)
|
||||
#define RING_SYNC_1(base) ((base)+0x44)
|
||||
#define RING_HWS_PGA(base) ((base)+0x80)
|
||||
#define RING_HWS_PGA_GEN6(base) ((base)+0x2080)
|
||||
#define RING_ACTHD(base) ((base)+0x74)
|
||||
#define RING_NOPID(base) ((base)+0x94)
|
||||
#define TAIL_ADDR 0x001FFFF8
|
||||
#define HEAD_WRAP_COUNT 0xFFE00000
|
||||
#define HEAD_WRAP_ONE 0x00200000
|
||||
@ -293,6 +301,7 @@
|
||||
#define RING_INVALID 0x00000000
|
||||
#define RING_WAIT_I8XX (1<<0) /* gen2, PRBx_HEAD */
|
||||
#define RING_WAIT (1<<11) /* gen3+, PRBx_CTL */
|
||||
#define RING_WAIT_SEMAPHORE (1<<10) /* gen6+ */
|
||||
#if 0
|
||||
#define PRB0_TAIL 0x02030
|
||||
#define PRB0_HEAD 0x02034
|
||||
@ -347,6 +356,14 @@
|
||||
# define VS_TIMER_DISPATCH (1 << 6)
|
||||
# define MI_FLUSH_ENABLE (1 << 11)
|
||||
|
||||
#define GFX_MODE 0x02520
|
||||
#define GFX_RUN_LIST_ENABLE (1<<15)
|
||||
#define GFX_TLB_INVALIDATE_ALWAYS (1<<13)
|
||||
#define GFX_SURFACE_FAULT_ENABLE (1<<12)
|
||||
#define GFX_REPLAY_MODE (1<<11)
|
||||
#define GFX_PSMI_GRANULARITY (1<<10)
|
||||
#define GFX_PPGTT_ENABLE (1<<9)
|
||||
|
||||
#define SCPD0 0x0209c /* 915+ only */
|
||||
#define IER 0x020a0
|
||||
#define IIR 0x020a4
|
||||
@ -498,7 +515,7 @@
|
||||
#define GEN6_BSD_SLEEP_PSMI_CONTROL_IDLE_INDICATOR (1 << 3)
|
||||
|
||||
#define GEN6_BSD_IMR 0x120a8
|
||||
#define GEN6_BSD_IMR_USER_INTERRUPT (1 << 12)
|
||||
#define GEN6_BSD_USER_INTERRUPT (1 << 12)
|
||||
|
||||
#define GEN6_BSD_RNCID 0x12198
|
||||
|
||||
|
@ -1998,7 +1998,7 @@ static void intel_clear_scanline_wait(struct drm_device *dev)
|
||||
/* Can't break the hang on i8xx */
|
||||
return;
|
||||
|
||||
ring = &dev_priv->render_ring;
|
||||
ring = LP_RING(dev_priv);
|
||||
tmp = I915_READ_CTL(ring);
|
||||
if (tmp & RING_WAIT)
|
||||
I915_WRITE_CTL(ring, tmp);
|
||||
@ -5124,7 +5124,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
|
||||
obj = intel_fb->obj;
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
ret = intel_pin_and_fence_fb_obj(dev, obj, &dev_priv->render_ring);
|
||||
ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv));
|
||||
if (ret)
|
||||
goto cleanup_work;
|
||||
|
||||
|
@ -273,14 +273,8 @@ void intel_opregion_enable_asle(struct drm_device *dev)
|
||||
struct opregion_asle *asle = dev_priv->opregion.asle;
|
||||
|
||||
if (asle) {
|
||||
if (IS_MOBILE(dev)) {
|
||||
unsigned long irqflags;
|
||||
|
||||
spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
|
||||
if (IS_MOBILE(dev))
|
||||
intel_enable_asle(dev);
|
||||
spin_unlock_irqrestore(&dev_priv->user_irq_lock,
|
||||
irqflags);
|
||||
}
|
||||
|
||||
asle->tche = ASLE_ALS_EN | ASLE_BLC_EN | ASLE_PFIT_EN |
|
||||
ASLE_PFMB_EN;
|
||||
|
@ -221,7 +221,7 @@ static int intel_overlay_do_wait_request(struct intel_overlay *overlay,
|
||||
int ret;
|
||||
|
||||
BUG_ON(overlay->last_flip_req);
|
||||
ret = i915_add_request(dev, NULL, request, &dev_priv->render_ring);
|
||||
ret = i915_add_request(dev, NULL, request, LP_RING(dev_priv));
|
||||
if (ret) {
|
||||
kfree(request);
|
||||
return ret;
|
||||
@ -230,7 +230,7 @@ static int intel_overlay_do_wait_request(struct intel_overlay *overlay,
|
||||
overlay->flip_tail = tail;
|
||||
ret = i915_do_wait_request(dev,
|
||||
overlay->last_flip_req, true,
|
||||
&dev_priv->render_ring);
|
||||
LP_RING(dev_priv));
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
@ -364,7 +364,7 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
|
||||
OUT_RING(flip_addr);
|
||||
ADVANCE_LP_RING();
|
||||
|
||||
ret = i915_add_request(dev, NULL, request, &dev_priv->render_ring);
|
||||
ret = i915_add_request(dev, NULL, request, LP_RING(dev_priv));
|
||||
if (ret) {
|
||||
kfree(request);
|
||||
return ret;
|
||||
@ -454,7 +454,7 @@ static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay,
|
||||
return 0;
|
||||
|
||||
ret = i915_do_wait_request(dev, overlay->last_flip_req,
|
||||
interruptible, &dev_priv->render_ring);
|
||||
interruptible, LP_RING(dev_priv));
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -203,6 +203,7 @@ static int init_ring_common(struct intel_ring_buffer *ring)
|
||||
if (ring->space < 0)
|
||||
ring->space += ring->size;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -281,17 +282,18 @@ cleanup_pipe_control(struct intel_ring_buffer *ring)
|
||||
static int init_render_ring(struct intel_ring_buffer *ring)
|
||||
{
|
||||
struct drm_device *dev = ring->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
int ret = init_ring_common(ring);
|
||||
|
||||
if (INTEL_INFO(dev)->gen > 3) {
|
||||
drm_i915_private_t *dev_priv = dev->dev_private;
|
||||
int mode = VS_TIMER_DISPATCH << 16 | VS_TIMER_DISPATCH;
|
||||
if (IS_GEN6(dev))
|
||||
mode |= MI_FLUSH_ENABLE << 16 | MI_FLUSH_ENABLE;
|
||||
I915_WRITE(MI_MODE, mode);
|
||||
}
|
||||
|
||||
if (HAS_PIPE_CONTROL(dev)) {
|
||||
if (INTEL_INFO(dev)->gen >= 6) {
|
||||
} else if (HAS_PIPE_CONTROL(dev)) {
|
||||
ret = init_pipe_control(ring);
|
||||
if (ret)
|
||||
return ret;
|
||||
@ -308,6 +310,80 @@ static void render_ring_cleanup(struct intel_ring_buffer *ring)
|
||||
cleanup_pipe_control(ring);
|
||||
}
|
||||
|
||||
static void
|
||||
update_semaphore(struct intel_ring_buffer *ring, int i, u32 seqno)
|
||||
{
|
||||
struct drm_device *dev = ring->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
int id;
|
||||
|
||||
/*
|
||||
* cs -> 1 = vcs, 0 = bcs
|
||||
* vcs -> 1 = bcs, 0 = cs,
|
||||
* bcs -> 1 = cs, 0 = vcs.
|
||||
*/
|
||||
id = ring - dev_priv->ring;
|
||||
id += 2 - i;
|
||||
id %= 3;
|
||||
|
||||
intel_ring_emit(ring,
|
||||
MI_SEMAPHORE_MBOX |
|
||||
MI_SEMAPHORE_REGISTER |
|
||||
MI_SEMAPHORE_UPDATE);
|
||||
intel_ring_emit(ring, seqno);
|
||||
intel_ring_emit(ring,
|
||||
RING_SYNC_0(dev_priv->ring[id].mmio_base) + 4*i);
|
||||
}
|
||||
|
||||
static int
|
||||
gen6_add_request(struct intel_ring_buffer *ring,
|
||||
u32 *result)
|
||||
{
|
||||
u32 seqno;
|
||||
int ret;
|
||||
|
||||
ret = intel_ring_begin(ring, 10);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
seqno = i915_gem_get_seqno(ring->dev);
|
||||
update_semaphore(ring, 0, seqno);
|
||||
update_semaphore(ring, 1, seqno);
|
||||
|
||||
intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
|
||||
intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
|
||||
intel_ring_emit(ring, seqno);
|
||||
intel_ring_emit(ring, MI_USER_INTERRUPT);
|
||||
intel_ring_advance(ring);
|
||||
|
||||
*result = seqno;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int
|
||||
intel_ring_sync(struct intel_ring_buffer *ring,
|
||||
struct intel_ring_buffer *to,
|
||||
u32 seqno)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = intel_ring_begin(ring, 4);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
intel_ring_emit(ring,
|
||||
MI_SEMAPHORE_MBOX |
|
||||
MI_SEMAPHORE_REGISTER |
|
||||
intel_ring_sync_index(ring, to) << 17 |
|
||||
MI_SEMAPHORE_COMPARE);
|
||||
intel_ring_emit(ring, seqno);
|
||||
intel_ring_emit(ring, 0);
|
||||
intel_ring_emit(ring, MI_NOOP);
|
||||
intel_ring_advance(ring);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
#define PIPE_CONTROL_FLUSH(ring__, addr__) \
|
||||
do { \
|
||||
intel_ring_emit(ring__, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | \
|
||||
@ -317,131 +393,128 @@ do { \
|
||||
intel_ring_emit(ring__, 0); \
|
||||
} while (0)
|
||||
|
||||
/**
|
||||
* Creates a new sequence number, emitting a write of it to the status page
|
||||
* plus an interrupt, which will trigger i915_user_interrupt_handler.
|
||||
*
|
||||
* Must be called with struct_lock held.
|
||||
*
|
||||
* Returned sequence numbers are nonzero on success.
|
||||
*/
|
||||
static int
|
||||
pc_render_add_request(struct intel_ring_buffer *ring,
|
||||
u32 *result)
|
||||
{
|
||||
struct drm_device *dev = ring->dev;
|
||||
u32 seqno = i915_gem_get_seqno(dev);
|
||||
struct pipe_control *pc = ring->private;
|
||||
u32 scratch_addr = pc->gtt_offset + 128;
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* Workaround qword write incoherence by flushing the
|
||||
* PIPE_NOTIFY buffers out to memory before requesting
|
||||
* an interrupt.
|
||||
*/
|
||||
ret = intel_ring_begin(ring, 32);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
intel_ring_emit(ring, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
|
||||
PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH);
|
||||
intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
|
||||
intel_ring_emit(ring, seqno);
|
||||
intel_ring_emit(ring, 0);
|
||||
PIPE_CONTROL_FLUSH(ring, scratch_addr);
|
||||
scratch_addr += 128; /* write to separate cachelines */
|
||||
PIPE_CONTROL_FLUSH(ring, scratch_addr);
|
||||
scratch_addr += 128;
|
||||
PIPE_CONTROL_FLUSH(ring, scratch_addr);
|
||||
scratch_addr += 128;
|
||||
PIPE_CONTROL_FLUSH(ring, scratch_addr);
|
||||
scratch_addr += 128;
|
||||
PIPE_CONTROL_FLUSH(ring, scratch_addr);
|
||||
scratch_addr += 128;
|
||||
PIPE_CONTROL_FLUSH(ring, scratch_addr);
|
||||
intel_ring_emit(ring, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
|
||||
PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH |
|
||||
PIPE_CONTROL_NOTIFY);
|
||||
intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
|
||||
intel_ring_emit(ring, seqno);
|
||||
intel_ring_emit(ring, 0);
|
||||
intel_ring_advance(ring);
|
||||
|
||||
*result = seqno;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
render_ring_add_request(struct intel_ring_buffer *ring,
|
||||
u32 *result)
|
||||
{
|
||||
struct drm_device *dev = ring->dev;
|
||||
u32 seqno = i915_gem_get_seqno(dev);
|
||||
struct pipe_control *pc = ring->private;
|
||||
int ret;
|
||||
|
||||
if (IS_GEN6(dev)) {
|
||||
ret = intel_ring_begin(ring, 6);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
intel_ring_emit(ring, GFX_OP_PIPE_CONTROL | 3);
|
||||
intel_ring_emit(ring, PIPE_CONTROL_QW_WRITE |
|
||||
PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_IS_FLUSH |
|
||||
PIPE_CONTROL_NOTIFY);
|
||||
intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
|
||||
intel_ring_emit(ring, seqno);
|
||||
intel_ring_emit(ring, 0);
|
||||
intel_ring_emit(ring, 0);
|
||||
} else if (HAS_PIPE_CONTROL(dev)) {
|
||||
u32 scratch_addr = pc->gtt_offset + 128;
|
||||
|
||||
/*
|
||||
* Workaround qword write incoherence by flushing the
|
||||
* PIPE_NOTIFY buffers out to memory before requesting
|
||||
* an interrupt.
|
||||
*/
|
||||
ret = intel_ring_begin(ring, 32);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
intel_ring_emit(ring, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
|
||||
PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH);
|
||||
intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
|
||||
intel_ring_emit(ring, seqno);
|
||||
intel_ring_emit(ring, 0);
|
||||
PIPE_CONTROL_FLUSH(ring, scratch_addr);
|
||||
scratch_addr += 128; /* write to separate cachelines */
|
||||
PIPE_CONTROL_FLUSH(ring, scratch_addr);
|
||||
scratch_addr += 128;
|
||||
PIPE_CONTROL_FLUSH(ring, scratch_addr);
|
||||
scratch_addr += 128;
|
||||
PIPE_CONTROL_FLUSH(ring, scratch_addr);
|
||||
scratch_addr += 128;
|
||||
PIPE_CONTROL_FLUSH(ring, scratch_addr);
|
||||
scratch_addr += 128;
|
||||
PIPE_CONTROL_FLUSH(ring, scratch_addr);
|
||||
intel_ring_emit(ring, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
|
||||
PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH |
|
||||
PIPE_CONTROL_NOTIFY);
|
||||
intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
|
||||
intel_ring_emit(ring, seqno);
|
||||
intel_ring_emit(ring, 0);
|
||||
} else {
|
||||
ret = intel_ring_begin(ring, 4);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
|
||||
intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
|
||||
intel_ring_emit(ring, seqno);
|
||||
|
||||
intel_ring_emit(ring, MI_USER_INTERRUPT);
|
||||
}
|
||||
ret = intel_ring_begin(ring, 4);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
|
||||
intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
|
||||
intel_ring_emit(ring, seqno);
|
||||
intel_ring_emit(ring, MI_USER_INTERRUPT);
|
||||
intel_ring_advance(ring);
|
||||
|
||||
*result = seqno;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static u32
|
||||
render_ring_get_seqno(struct intel_ring_buffer *ring)
|
||||
ring_get_seqno(struct intel_ring_buffer *ring)
|
||||
{
|
||||
struct drm_device *dev = ring->dev;
|
||||
if (HAS_PIPE_CONTROL(dev)) {
|
||||
struct pipe_control *pc = ring->private;
|
||||
return pc->cpu_page[0];
|
||||
} else
|
||||
return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
|
||||
return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
|
||||
}
|
||||
|
||||
static u32
|
||||
pc_render_get_seqno(struct intel_ring_buffer *ring)
|
||||
{
|
||||
struct pipe_control *pc = ring->private;
|
||||
return pc->cpu_page[0];
|
||||
}
|
||||
|
||||
static void
|
||||
render_ring_get_user_irq(struct intel_ring_buffer *ring)
|
||||
render_ring_get_irq(struct intel_ring_buffer *ring)
|
||||
{
|
||||
struct drm_device *dev = ring->dev;
|
||||
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
|
||||
unsigned long irqflags;
|
||||
|
||||
spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
|
||||
if (dev->irq_enabled && (++ring->user_irq_refcount == 1)) {
|
||||
if (dev->irq_enabled && ++ring->irq_refcount == 1) {
|
||||
drm_i915_private_t *dev_priv = dev->dev_private;
|
||||
unsigned long irqflags;
|
||||
|
||||
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
|
||||
|
||||
if (HAS_PCH_SPLIT(dev))
|
||||
ironlake_enable_graphics_irq(dev_priv, GT_PIPE_NOTIFY);
|
||||
ironlake_enable_graphics_irq(dev_priv,
|
||||
GT_PIPE_NOTIFY | GT_USER_INTERRUPT);
|
||||
else
|
||||
i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
|
||||
|
||||
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
|
||||
}
|
||||
spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
|
||||
}
|
||||
|
||||
static void
|
||||
render_ring_put_user_irq(struct intel_ring_buffer *ring)
|
||||
render_ring_put_irq(struct intel_ring_buffer *ring)
|
||||
{
|
||||
struct drm_device *dev = ring->dev;
|
||||
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
|
||||
unsigned long irqflags;
|
||||
|
||||
spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
|
||||
BUG_ON(dev->irq_enabled && ring->user_irq_refcount <= 0);
|
||||
if (dev->irq_enabled && (--ring->user_irq_refcount == 0)) {
|
||||
BUG_ON(dev->irq_enabled && ring->irq_refcount == 0);
|
||||
if (dev->irq_enabled && --ring->irq_refcount == 0) {
|
||||
drm_i915_private_t *dev_priv = dev->dev_private;
|
||||
unsigned long irqflags;
|
||||
|
||||
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
|
||||
if (HAS_PCH_SPLIT(dev))
|
||||
ironlake_disable_graphics_irq(dev_priv, GT_PIPE_NOTIFY);
|
||||
ironlake_disable_graphics_irq(dev_priv,
|
||||
GT_USER_INTERRUPT |
|
||||
GT_PIPE_NOTIFY);
|
||||
else
|
||||
i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
|
||||
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
|
||||
}
|
||||
spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
|
||||
}
|
||||
|
||||
void intel_ring_setup_status_page(struct intel_ring_buffer *ring)
|
||||
@ -459,6 +532,9 @@ bsd_ring_flush(struct intel_ring_buffer *ring,
|
||||
u32 invalidate_domains,
|
||||
u32 flush_domains)
|
||||
{
|
||||
if ((flush_domains & I915_GEM_DOMAIN_RENDER) == 0)
|
||||
return;
|
||||
|
||||
if (intel_ring_begin(ring, 2) == 0) {
|
||||
intel_ring_emit(ring, MI_FLUSH);
|
||||
intel_ring_emit(ring, MI_NOOP);
|
||||
@ -491,20 +567,45 @@ ring_add_request(struct intel_ring_buffer *ring,
|
||||
}
|
||||
|
||||
static void
|
||||
bsd_ring_get_user_irq(struct intel_ring_buffer *ring)
|
||||
ring_get_irq(struct intel_ring_buffer *ring, u32 flag)
|
||||
{
|
||||
/* do nothing */
|
||||
}
|
||||
static void
|
||||
bsd_ring_put_user_irq(struct intel_ring_buffer *ring)
|
||||
{
|
||||
/* do nothing */
|
||||
struct drm_device *dev = ring->dev;
|
||||
|
||||
if (dev->irq_enabled && ++ring->irq_refcount == 1) {
|
||||
drm_i915_private_t *dev_priv = dev->dev_private;
|
||||
unsigned long irqflags;
|
||||
|
||||
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
|
||||
ironlake_enable_graphics_irq(dev_priv, flag);
|
||||
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
|
||||
}
|
||||
}
|
||||
|
||||
static u32
|
||||
ring_status_page_get_seqno(struct intel_ring_buffer *ring)
|
||||
static void
|
||||
ring_put_irq(struct intel_ring_buffer *ring, u32 flag)
|
||||
{
|
||||
return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
|
||||
struct drm_device *dev = ring->dev;
|
||||
|
||||
if (dev->irq_enabled && --ring->irq_refcount == 0) {
|
||||
drm_i915_private_t *dev_priv = dev->dev_private;
|
||||
unsigned long irqflags;
|
||||
|
||||
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
|
||||
ironlake_disable_graphics_irq(dev_priv, flag);
|
||||
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
static void
|
||||
bsd_ring_get_irq(struct intel_ring_buffer *ring)
|
||||
{
|
||||
ring_get_irq(ring, GT_BSD_USER_INTERRUPT);
|
||||
}
|
||||
static void
|
||||
bsd_ring_put_irq(struct intel_ring_buffer *ring)
|
||||
{
|
||||
ring_put_irq(ring, GT_BSD_USER_INTERRUPT);
|
||||
}
|
||||
|
||||
static int
|
||||
@ -817,9 +918,9 @@ static const struct intel_ring_buffer render_ring = {
|
||||
.write_tail = ring_write_tail,
|
||||
.flush = render_ring_flush,
|
||||
.add_request = render_ring_add_request,
|
||||
.get_seqno = render_ring_get_seqno,
|
||||
.user_irq_get = render_ring_get_user_irq,
|
||||
.user_irq_put = render_ring_put_user_irq,
|
||||
.get_seqno = ring_get_seqno,
|
||||
.irq_get = render_ring_get_irq,
|
||||
.irq_put = render_ring_put_irq,
|
||||
.dispatch_execbuffer = render_ring_dispatch_execbuffer,
|
||||
.cleanup = render_ring_cleanup,
|
||||
};
|
||||
@ -835,9 +936,9 @@ static const struct intel_ring_buffer bsd_ring = {
|
||||
.write_tail = ring_write_tail,
|
||||
.flush = bsd_ring_flush,
|
||||
.add_request = ring_add_request,
|
||||
.get_seqno = ring_status_page_get_seqno,
|
||||
.user_irq_get = bsd_ring_get_user_irq,
|
||||
.user_irq_put = bsd_ring_put_user_irq,
|
||||
.get_seqno = ring_get_seqno,
|
||||
.irq_get = bsd_ring_get_irq,
|
||||
.irq_put = bsd_ring_put_irq,
|
||||
.dispatch_execbuffer = ring_dispatch_execbuffer,
|
||||
};
|
||||
|
||||
@ -868,6 +969,9 @@ static void gen6_ring_flush(struct intel_ring_buffer *ring,
|
||||
u32 invalidate_domains,
|
||||
u32 flush_domains)
|
||||
{
|
||||
if ((flush_domains & I915_GEM_DOMAIN_RENDER) == 0)
|
||||
return;
|
||||
|
||||
if (intel_ring_begin(ring, 4) == 0) {
|
||||
intel_ring_emit(ring, MI_FLUSH_DW);
|
||||
intel_ring_emit(ring, 0);
|
||||
@ -895,33 +999,46 @@ gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
gen6_bsd_ring_get_irq(struct intel_ring_buffer *ring)
|
||||
{
|
||||
ring_get_irq(ring, GT_GEN6_BSD_USER_INTERRUPT);
|
||||
}
|
||||
|
||||
static void
|
||||
gen6_bsd_ring_put_irq(struct intel_ring_buffer *ring)
|
||||
{
|
||||
ring_put_irq(ring, GT_GEN6_BSD_USER_INTERRUPT);
|
||||
}
|
||||
|
||||
/* ring buffer for Video Codec for Gen6+ */
|
||||
static const struct intel_ring_buffer gen6_bsd_ring = {
|
||||
.name = "gen6 bsd ring",
|
||||
.id = RING_BSD,
|
||||
.mmio_base = GEN6_BSD_RING_BASE,
|
||||
.size = 32 * PAGE_SIZE,
|
||||
.init = init_ring_common,
|
||||
.write_tail = gen6_bsd_ring_write_tail,
|
||||
.flush = gen6_ring_flush,
|
||||
.add_request = ring_add_request,
|
||||
.get_seqno = ring_status_page_get_seqno,
|
||||
.user_irq_get = bsd_ring_get_user_irq,
|
||||
.user_irq_put = bsd_ring_put_user_irq,
|
||||
.dispatch_execbuffer = gen6_ring_dispatch_execbuffer,
|
||||
.name = "gen6 bsd ring",
|
||||
.id = RING_BSD,
|
||||
.mmio_base = GEN6_BSD_RING_BASE,
|
||||
.size = 32 * PAGE_SIZE,
|
||||
.init = init_ring_common,
|
||||
.write_tail = gen6_bsd_ring_write_tail,
|
||||
.flush = gen6_ring_flush,
|
||||
.add_request = gen6_add_request,
|
||||
.get_seqno = ring_get_seqno,
|
||||
.irq_get = gen6_bsd_ring_get_irq,
|
||||
.irq_put = gen6_bsd_ring_put_irq,
|
||||
.dispatch_execbuffer = gen6_ring_dispatch_execbuffer,
|
||||
};
|
||||
|
||||
/* Blitter support (SandyBridge+) */
|
||||
|
||||
static void
|
||||
blt_ring_get_user_irq(struct intel_ring_buffer *ring)
|
||||
blt_ring_get_irq(struct intel_ring_buffer *ring)
|
||||
{
|
||||
/* do nothing */
|
||||
ring_get_irq(ring, GT_BLT_USER_INTERRUPT);
|
||||
}
|
||||
|
||||
static void
|
||||
blt_ring_put_user_irq(struct intel_ring_buffer *ring)
|
||||
blt_ring_put_irq(struct intel_ring_buffer *ring)
|
||||
{
|
||||
/* do nothing */
|
||||
ring_put_irq(ring, GT_BLT_USER_INTERRUPT);
|
||||
}
|
||||
|
||||
|
||||
@ -994,6 +1111,9 @@ static void blt_ring_flush(struct intel_ring_buffer *ring,
|
||||
u32 invalidate_domains,
|
||||
u32 flush_domains)
|
||||
{
|
||||
if ((flush_domains & I915_GEM_DOMAIN_RENDER) == 0)
|
||||
return;
|
||||
|
||||
if (blt_ring_begin(ring, 4) == 0) {
|
||||
intel_ring_emit(ring, MI_FLUSH_DW);
|
||||
intel_ring_emit(ring, 0);
|
||||
@ -1003,30 +1123,6 @@ static void blt_ring_flush(struct intel_ring_buffer *ring,
|
||||
}
|
||||
}
|
||||
|
||||
static int
|
||||
blt_ring_add_request(struct intel_ring_buffer *ring,
|
||||
u32 *result)
|
||||
{
|
||||
u32 seqno;
|
||||
int ret;
|
||||
|
||||
ret = blt_ring_begin(ring, 4);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
seqno = i915_gem_get_seqno(ring->dev);
|
||||
|
||||
intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
|
||||
intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
|
||||
intel_ring_emit(ring, seqno);
|
||||
intel_ring_emit(ring, MI_USER_INTERRUPT);
|
||||
intel_ring_advance(ring);
|
||||
|
||||
DRM_DEBUG_DRIVER("%s %d\n", ring->name, seqno);
|
||||
*result = seqno;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void blt_ring_cleanup(struct intel_ring_buffer *ring)
|
||||
{
|
||||
if (!ring->private)
|
||||
@ -1045,10 +1141,10 @@ static const struct intel_ring_buffer gen6_blt_ring = {
|
||||
.init = blt_ring_init,
|
||||
.write_tail = ring_write_tail,
|
||||
.flush = blt_ring_flush,
|
||||
.add_request = blt_ring_add_request,
|
||||
.get_seqno = ring_status_page_get_seqno,
|
||||
.user_irq_get = blt_ring_get_user_irq,
|
||||
.user_irq_put = blt_ring_put_user_irq,
|
||||
.add_request = gen6_add_request,
|
||||
.get_seqno = ring_get_seqno,
|
||||
.irq_get = blt_ring_get_irq,
|
||||
.irq_put = blt_ring_put_irq,
|
||||
.dispatch_execbuffer = gen6_ring_dispatch_execbuffer,
|
||||
.cleanup = blt_ring_cleanup,
|
||||
};
|
||||
@ -1056,36 +1152,43 @@ static const struct intel_ring_buffer gen6_blt_ring = {
|
||||
int intel_init_render_ring_buffer(struct drm_device *dev)
|
||||
{
|
||||
drm_i915_private_t *dev_priv = dev->dev_private;
|
||||
struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
|
||||
|
||||
dev_priv->render_ring = render_ring;
|
||||
|
||||
if (!I915_NEED_GFX_HWS(dev)) {
|
||||
dev_priv->render_ring.status_page.page_addr
|
||||
= dev_priv->status_page_dmah->vaddr;
|
||||
memset(dev_priv->render_ring.status_page.page_addr,
|
||||
0, PAGE_SIZE);
|
||||
*ring = render_ring;
|
||||
if (INTEL_INFO(dev)->gen >= 6) {
|
||||
ring->add_request = gen6_add_request;
|
||||
} else if (HAS_PIPE_CONTROL(dev)) {
|
||||
ring->add_request = pc_render_add_request;
|
||||
ring->get_seqno = pc_render_get_seqno;
|
||||
}
|
||||
|
||||
return intel_init_ring_buffer(dev, &dev_priv->render_ring);
|
||||
if (!I915_NEED_GFX_HWS(dev)) {
|
||||
ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
|
||||
memset(ring->status_page.page_addr, 0, PAGE_SIZE);
|
||||
}
|
||||
|
||||
return intel_init_ring_buffer(dev, ring);
|
||||
}
|
||||
|
||||
int intel_init_bsd_ring_buffer(struct drm_device *dev)
|
||||
{
|
||||
drm_i915_private_t *dev_priv = dev->dev_private;
|
||||
struct intel_ring_buffer *ring = &dev_priv->ring[VCS];
|
||||
|
||||
if (IS_GEN6(dev))
|
||||
dev_priv->bsd_ring = gen6_bsd_ring;
|
||||
*ring = gen6_bsd_ring;
|
||||
else
|
||||
dev_priv->bsd_ring = bsd_ring;
|
||||
*ring = bsd_ring;
|
||||
|
||||
return intel_init_ring_buffer(dev, &dev_priv->bsd_ring);
|
||||
return intel_init_ring_buffer(dev, ring);
|
||||
}
|
||||
|
||||
int intel_init_blt_ring_buffer(struct drm_device *dev)
|
||||
{
|
||||
drm_i915_private_t *dev_priv = dev->dev_private;
|
||||
struct intel_ring_buffer *ring = &dev_priv->ring[BCS];
|
||||
|
||||
dev_priv->blt_ring = gen6_blt_ring;
|
||||
*ring = gen6_blt_ring;
|
||||
|
||||
return intel_init_ring_buffer(dev, &dev_priv->blt_ring);
|
||||
return intel_init_ring_buffer(dev, ring);
|
||||
}
|
||||
|
@ -1,6 +1,13 @@
|
||||
#ifndef _INTEL_RINGBUFFER_H_
|
||||
#define _INTEL_RINGBUFFER_H_
|
||||
|
||||
enum {
|
||||
RCS = 0x0,
|
||||
VCS,
|
||||
BCS,
|
||||
I915_NUM_RINGS,
|
||||
};
|
||||
|
||||
struct intel_hw_status_page {
|
||||
u32 __iomem *page_addr;
|
||||
unsigned int gfx_addr;
|
||||
@ -21,7 +28,10 @@ struct intel_hw_status_page {
|
||||
#define I915_READ_CTL(ring) I915_RING_READ(RING_CTL(ring->mmio_base))
|
||||
#define I915_WRITE_CTL(ring, val) I915_WRITE(RING_CTL(ring->mmio_base), val)
|
||||
|
||||
struct drm_i915_gem_execbuffer2;
|
||||
#define I915_READ_NOPID(ring) I915_RING_READ(RING_NOPID(ring->mmio_base))
|
||||
#define I915_READ_SYNC_0(ring) I915_RING_READ(RING_SYNC_0(ring->mmio_base))
|
||||
#define I915_READ_SYNC_1(ring) I915_RING_READ(RING_SYNC_1(ring->mmio_base))
|
||||
|
||||
struct intel_ring_buffer {
|
||||
const char *name;
|
||||
enum intel_ring_id {
|
||||
@ -42,9 +52,10 @@ struct intel_ring_buffer {
|
||||
|
||||
u32 irq_seqno; /* last seq seem at irq time */
|
||||
u32 waiting_seqno;
|
||||
int user_irq_refcount;
|
||||
void (*user_irq_get)(struct intel_ring_buffer *ring);
|
||||
void (*user_irq_put)(struct intel_ring_buffer *ring);
|
||||
u32 sync_seqno[I915_NUM_RINGS-1];
|
||||
u32 irq_refcount;
|
||||
void (*irq_get)(struct intel_ring_buffer *ring);
|
||||
void (*irq_put)(struct intel_ring_buffer *ring);
|
||||
|
||||
int (*init)(struct intel_ring_buffer *ring);
|
||||
|
||||
@ -98,6 +109,25 @@ struct intel_ring_buffer {
|
||||
void *private;
|
||||
};
|
||||
|
||||
static inline u32
|
||||
intel_ring_sync_index(struct intel_ring_buffer *ring,
|
||||
struct intel_ring_buffer *other)
|
||||
{
|
||||
int idx;
|
||||
|
||||
/*
|
||||
* cs -> 0 = vcs, 1 = bcs
|
||||
* vcs -> 0 = bcs, 1 = cs,
|
||||
* bcs -> 0 = cs, 1 = vcs.
|
||||
*/
|
||||
|
||||
idx = (other - ring) - 1;
|
||||
if (idx < 0)
|
||||
idx += I915_NUM_RINGS;
|
||||
|
||||
return idx;
|
||||
}
|
||||
|
||||
static inline u32
|
||||
intel_read_status_page(struct intel_ring_buffer *ring,
|
||||
int reg)
|
||||
@ -119,6 +149,9 @@ static inline void intel_ring_emit(struct intel_ring_buffer *ring,
|
||||
void intel_ring_advance(struct intel_ring_buffer *ring);
|
||||
|
||||
u32 intel_ring_get_seqno(struct intel_ring_buffer *ring);
|
||||
int intel_ring_sync(struct intel_ring_buffer *ring,
|
||||
struct intel_ring_buffer *to,
|
||||
u32 seqno);
|
||||
|
||||
int intel_init_render_ring_buffer(struct drm_device *dev);
|
||||
int intel_init_bsd_ring_buffer(struct drm_device *dev);
|
||||
|
@ -1245,10 +1245,11 @@ intel_tv_detect_type (struct intel_tv *intel_tv)
|
||||
int type;
|
||||
|
||||
/* Disable TV interrupts around load detect or we'll recurse */
|
||||
spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
|
||||
i915_disable_pipestat(dev_priv, 0, PIPE_HOTPLUG_INTERRUPT_ENABLE |
|
||||
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
|
||||
i915_disable_pipestat(dev_priv, 0,
|
||||
PIPE_HOTPLUG_INTERRUPT_ENABLE |
|
||||
PIPE_HOTPLUG_TV_INTERRUPT_ENABLE);
|
||||
spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
|
||||
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
|
||||
|
||||
save_tv_dac = tv_dac = I915_READ(TV_DAC);
|
||||
save_tv_ctl = tv_ctl = I915_READ(TV_CTL);
|
||||
@ -1301,10 +1302,11 @@ intel_tv_detect_type (struct intel_tv *intel_tv)
|
||||
I915_WRITE(TV_CTL, save_tv_ctl);
|
||||
|
||||
/* Restore interrupt config */
|
||||
spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
|
||||
i915_enable_pipestat(dev_priv, 0, PIPE_HOTPLUG_INTERRUPT_ENABLE |
|
||||
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
|
||||
i915_enable_pipestat(dev_priv, 0,
|
||||
PIPE_HOTPLUG_INTERRUPT_ENABLE |
|
||||
PIPE_HOTPLUG_TV_INTERRUPT_ENABLE);
|
||||
spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
|
||||
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
|
||||
|
||||
return type;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user