mirror of
https://github.com/torvalds/linux.git
synced 2024-12-29 06:12:08 +00:00
drm/i915: Split the ringbuffers from the rings (2/3)
This refactoring has been performed using the following Coccinelle semantic script: @@ struct intel_engine_cs r; @@ ( - (r).obj + r.buffer->obj | - (r).virtual_start + r.buffer->virtual_start | - (r).head + r.buffer->head | - (r).tail + r.buffer->tail | - (r).space + r.buffer->space | - (r).size + r.buffer->size | - (r).effective_size + r.buffer->effective_size | - (r).last_retired_head + r.buffer->last_retired_head ) @@ struct intel_engine_cs *r; @@ ( - (r)->obj + r->buffer->obj | - (r)->virtual_start + r->buffer->virtual_start | - (r)->head + r->buffer->head | - (r)->tail + r->buffer->tail | - (r)->space + r->buffer->space | - (r)->size + r->buffer->size | - (r)->effective_size + r->buffer->effective_size | - (r)->last_retired_head + r->buffer->last_retired_head ) @@ expression E; @@ ( - LP_RING(E)->obj + LP_RING(E)->buffer->obj | - LP_RING(E)->virtual_start + LP_RING(E)->buffer->virtual_start | - LP_RING(E)->head + LP_RING(E)->buffer->head | - LP_RING(E)->tail + LP_RING(E)->buffer->tail | - LP_RING(E)->space + LP_RING(E)->buffer->space | - LP_RING(E)->size + LP_RING(E)->buffer->size | - LP_RING(E)->effective_size + LP_RING(E)->buffer->effective_size | - LP_RING(E)->last_retired_head + LP_RING(E)->buffer->last_retired_head ) Note: On top of this this patch also removes the now unused ringbuffer fields in intel_engine_cs. Signed-off-by: Oscar Mateo <oscar.mateo@intel.com> [danvet: Add note about fixup patch included here.] Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
This commit is contained in:
parent
8ee149756e
commit
ee1b1e5ef3
@ -64,7 +64,7 @@
|
|||||||
* has access to the ring.
|
* has access to the ring.
|
||||||
*/
|
*/
|
||||||
#define RING_LOCK_TEST_WITH_RETURN(dev, file) do { \
|
#define RING_LOCK_TEST_WITH_RETURN(dev, file) do { \
|
||||||
if (LP_RING(dev->dev_private)->obj == NULL) \
|
if (LP_RING(dev->dev_private)->buffer->obj == NULL) \
|
||||||
LOCK_TEST_WITH_RETURN(dev, file); \
|
LOCK_TEST_WITH_RETURN(dev, file); \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
@ -149,17 +149,17 @@ void i915_kernel_lost_context(struct drm_device * dev)
|
|||||||
if (drm_core_check_feature(dev, DRIVER_MODESET))
|
if (drm_core_check_feature(dev, DRIVER_MODESET))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
ring->head = I915_READ_HEAD(ring) & HEAD_ADDR;
|
ring->buffer->head = I915_READ_HEAD(ring) & HEAD_ADDR;
|
||||||
ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
|
ring->buffer->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
|
||||||
ring->space = ring->head - (ring->tail + I915_RING_FREE_SPACE);
|
ring->buffer->space = ring->buffer->head - (ring->buffer->tail + I915_RING_FREE_SPACE);
|
||||||
if (ring->space < 0)
|
if (ring->buffer->space < 0)
|
||||||
ring->space += ring->size;
|
ring->buffer->space += ring->buffer->size;
|
||||||
|
|
||||||
if (!dev->primary->master)
|
if (!dev->primary->master)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
master_priv = dev->primary->master->driver_priv;
|
master_priv = dev->primary->master->driver_priv;
|
||||||
if (ring->head == ring->tail && master_priv->sarea_priv)
|
if (ring->buffer->head == ring->buffer->tail && master_priv->sarea_priv)
|
||||||
master_priv->sarea_priv->perf_boxes |= I915_BOX_RING_EMPTY;
|
master_priv->sarea_priv->perf_boxes |= I915_BOX_RING_EMPTY;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -202,7 +202,7 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (init->ring_size != 0) {
|
if (init->ring_size != 0) {
|
||||||
if (LP_RING(dev_priv)->obj != NULL) {
|
if (LP_RING(dev_priv)->buffer->obj != NULL) {
|
||||||
i915_dma_cleanup(dev);
|
i915_dma_cleanup(dev);
|
||||||
DRM_ERROR("Client tried to initialize ringbuffer in "
|
DRM_ERROR("Client tried to initialize ringbuffer in "
|
||||||
"GEM mode\n");
|
"GEM mode\n");
|
||||||
@ -239,7 +239,7 @@ static int i915_dma_resume(struct drm_device * dev)
|
|||||||
|
|
||||||
DRM_DEBUG_DRIVER("%s\n", __func__);
|
DRM_DEBUG_DRIVER("%s\n", __func__);
|
||||||
|
|
||||||
if (ring->virtual_start == NULL) {
|
if (ring->buffer->virtual_start == NULL) {
|
||||||
DRM_ERROR("can not ioremap virtual address for"
|
DRM_ERROR("can not ioremap virtual address for"
|
||||||
" ring buffer\n");
|
" ring buffer\n");
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
@ -361,7 +361,7 @@ static int i915_emit_cmds(struct drm_device * dev, int *buffer, int dwords)
|
|||||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||||
int i, ret;
|
int i, ret;
|
||||||
|
|
||||||
if ((dwords+1) * sizeof(int) >= LP_RING(dev_priv)->size - 8)
|
if ((dwords+1) * sizeof(int) >= LP_RING(dev_priv)->buffer->size - 8)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
for (i = 0; i < dwords;) {
|
for (i = 0; i < dwords;) {
|
||||||
@ -824,7 +824,7 @@ static int i915_irq_emit(struct drm_device *dev, void *data,
|
|||||||
if (drm_core_check_feature(dev, DRIVER_MODESET))
|
if (drm_core_check_feature(dev, DRIVER_MODESET))
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
|
|
||||||
if (!dev_priv || !LP_RING(dev_priv)->virtual_start) {
|
if (!dev_priv || !LP_RING(dev_priv)->buffer->virtual_start) {
|
||||||
DRM_ERROR("called with no initialization\n");
|
DRM_ERROR("called with no initialization\n");
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
@ -2523,7 +2523,7 @@ i915_gem_retire_requests_ring(struct intel_engine_cs *ring)
|
|||||||
* of tail of the request to update the last known position
|
* of tail of the request to update the last known position
|
||||||
* of the GPU head.
|
* of the GPU head.
|
||||||
*/
|
*/
|
||||||
ring->last_retired_head = request->tail;
|
ring->buffer->last_retired_head = request->tail;
|
||||||
|
|
||||||
i915_gem_free_request(request);
|
i915_gem_free_request(request);
|
||||||
}
|
}
|
||||||
|
@ -825,8 +825,8 @@ static void i915_record_ring_state(struct drm_device *dev,
|
|||||||
ering->hws = I915_READ(mmio);
|
ering->hws = I915_READ(mmio);
|
||||||
}
|
}
|
||||||
|
|
||||||
ering->cpu_ring_head = ring->head;
|
ering->cpu_ring_head = ring->buffer->head;
|
||||||
ering->cpu_ring_tail = ring->tail;
|
ering->cpu_ring_tail = ring->buffer->tail;
|
||||||
|
|
||||||
ering->hangcheck_score = ring->hangcheck.score;
|
ering->hangcheck_score = ring->hangcheck.score;
|
||||||
ering->hangcheck_action = ring->hangcheck.action;
|
ering->hangcheck_action = ring->hangcheck.action;
|
||||||
@ -930,7 +930,7 @@ static void i915_gem_record_rings(struct drm_device *dev,
|
|||||||
}
|
}
|
||||||
|
|
||||||
error->ring[i].ringbuffer =
|
error->ring[i].ringbuffer =
|
||||||
i915_error_ggtt_object_create(dev_priv, ring->obj);
|
i915_error_ggtt_object_create(dev_priv, ring->buffer->obj);
|
||||||
|
|
||||||
if (ring->status_page.obj)
|
if (ring->status_page.obj)
|
||||||
error->ring[i].hws_page =
|
error->ring[i].hws_page =
|
||||||
|
@ -1219,7 +1219,7 @@ static void ironlake_rps_change_irq_handler(struct drm_device *dev)
|
|||||||
static void notify_ring(struct drm_device *dev,
|
static void notify_ring(struct drm_device *dev,
|
||||||
struct intel_engine_cs *ring)
|
struct intel_engine_cs *ring)
|
||||||
{
|
{
|
||||||
if (ring->obj == NULL)
|
if (ring->buffer->obj == NULL)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
trace_i915_gem_request_complete(ring);
|
trace_i915_gem_request_complete(ring);
|
||||||
@ -2837,10 +2837,10 @@ semaphore_waits_for(struct intel_engine_cs *ring, u32 *seqno)
|
|||||||
* our ring is smaller than what the hardware (and hence
|
* our ring is smaller than what the hardware (and hence
|
||||||
* HEAD_ADDR) allows. Also handles wrap-around.
|
* HEAD_ADDR) allows. Also handles wrap-around.
|
||||||
*/
|
*/
|
||||||
head &= ring->size - 1;
|
head &= ring->buffer->size - 1;
|
||||||
|
|
||||||
/* This here seems to blow up */
|
/* This here seems to blow up */
|
||||||
cmd = ioread32(ring->virtual_start + head);
|
cmd = ioread32(ring->buffer->virtual_start + head);
|
||||||
if (cmd == ipehr)
|
if (cmd == ipehr)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
@ -2850,7 +2850,7 @@ semaphore_waits_for(struct intel_engine_cs *ring, u32 *seqno)
|
|||||||
if (!i)
|
if (!i)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
*seqno = ioread32(ring->virtual_start + head + 4) + 1;
|
*seqno = ioread32(ring->buffer->virtual_start + head + 4) + 1;
|
||||||
return semaphore_wait_to_signaller_ring(ring, ipehr);
|
return semaphore_wait_to_signaller_ring(ring, ipehr);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -50,7 +50,7 @@ static inline int __ring_space(int head, int tail, int size)
|
|||||||
|
|
||||||
static inline int ring_space(struct intel_engine_cs *ring)
|
static inline int ring_space(struct intel_engine_cs *ring)
|
||||||
{
|
{
|
||||||
return __ring_space(ring->head & HEAD_ADDR, ring->tail, ring->size);
|
return __ring_space(ring->buffer->head & HEAD_ADDR, ring->buffer->tail, ring->buffer->size);
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool intel_ring_stopped(struct intel_engine_cs *ring)
|
static bool intel_ring_stopped(struct intel_engine_cs *ring)
|
||||||
@ -61,10 +61,10 @@ static bool intel_ring_stopped(struct intel_engine_cs *ring)
|
|||||||
|
|
||||||
void __intel_ring_advance(struct intel_engine_cs *ring)
|
void __intel_ring_advance(struct intel_engine_cs *ring)
|
||||||
{
|
{
|
||||||
ring->tail &= ring->size - 1;
|
ring->buffer->tail &= ring->buffer->size - 1;
|
||||||
if (intel_ring_stopped(ring))
|
if (intel_ring_stopped(ring))
|
||||||
return;
|
return;
|
||||||
ring->write_tail(ring, ring->tail);
|
ring->write_tail(ring, ring->buffer->tail);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int
|
static int
|
||||||
@ -481,7 +481,7 @@ static int init_ring_common(struct intel_engine_cs *ring)
|
|||||||
{
|
{
|
||||||
struct drm_device *dev = ring->dev;
|
struct drm_device *dev = ring->dev;
|
||||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||||
struct drm_i915_gem_object *obj = ring->obj;
|
struct drm_i915_gem_object *obj = ring->buffer->obj;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
|
gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
|
||||||
@ -520,7 +520,7 @@ static int init_ring_common(struct intel_engine_cs *ring)
|
|||||||
* register values. */
|
* register values. */
|
||||||
I915_WRITE_START(ring, i915_gem_obj_ggtt_offset(obj));
|
I915_WRITE_START(ring, i915_gem_obj_ggtt_offset(obj));
|
||||||
I915_WRITE_CTL(ring,
|
I915_WRITE_CTL(ring,
|
||||||
((ring->size - PAGE_SIZE) & RING_NR_PAGES)
|
((ring->buffer->size - PAGE_SIZE) & RING_NR_PAGES)
|
||||||
| RING_VALID);
|
| RING_VALID);
|
||||||
|
|
||||||
/* If the head is still not zero, the ring is dead */
|
/* If the head is still not zero, the ring is dead */
|
||||||
@ -540,10 +540,10 @@ static int init_ring_common(struct intel_engine_cs *ring)
|
|||||||
if (!drm_core_check_feature(ring->dev, DRIVER_MODESET))
|
if (!drm_core_check_feature(ring->dev, DRIVER_MODESET))
|
||||||
i915_kernel_lost_context(ring->dev);
|
i915_kernel_lost_context(ring->dev);
|
||||||
else {
|
else {
|
||||||
ring->head = I915_READ_HEAD(ring);
|
ring->buffer->head = I915_READ_HEAD(ring);
|
||||||
ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
|
ring->buffer->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
|
||||||
ring->space = ring_space(ring);
|
ring->buffer->space = ring_space(ring);
|
||||||
ring->last_retired_head = -1;
|
ring->buffer->last_retired_head = -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
memset(&ring->hangcheck, 0, sizeof(ring->hangcheck));
|
memset(&ring->hangcheck, 0, sizeof(ring->hangcheck));
|
||||||
@ -1382,14 +1382,14 @@ static int allocate_ring_buffer(struct intel_engine_cs *ring)
|
|||||||
struct drm_i915_gem_object *obj;
|
struct drm_i915_gem_object *obj;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
if (ring->obj)
|
if (ring->buffer->obj)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
obj = NULL;
|
obj = NULL;
|
||||||
if (!HAS_LLC(dev))
|
if (!HAS_LLC(dev))
|
||||||
obj = i915_gem_object_create_stolen(dev, ring->size);
|
obj = i915_gem_object_create_stolen(dev, ring->buffer->size);
|
||||||
if (obj == NULL)
|
if (obj == NULL)
|
||||||
obj = i915_gem_alloc_object(dev, ring->size);
|
obj = i915_gem_alloc_object(dev, ring->buffer->size);
|
||||||
if (obj == NULL)
|
if (obj == NULL)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
@ -1401,15 +1401,15 @@ static int allocate_ring_buffer(struct intel_engine_cs *ring)
|
|||||||
if (ret)
|
if (ret)
|
||||||
goto err_unpin;
|
goto err_unpin;
|
||||||
|
|
||||||
ring->virtual_start =
|
ring->buffer->virtual_start =
|
||||||
ioremap_wc(dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj),
|
ioremap_wc(dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj),
|
||||||
ring->size);
|
ring->buffer->size);
|
||||||
if (ring->virtual_start == NULL) {
|
if (ring->buffer->virtual_start == NULL) {
|
||||||
ret = -EINVAL;
|
ret = -EINVAL;
|
||||||
goto err_unpin;
|
goto err_unpin;
|
||||||
}
|
}
|
||||||
|
|
||||||
ring->obj = obj;
|
ring->buffer->obj = obj;
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
err_unpin:
|
err_unpin:
|
||||||
@ -1435,7 +1435,7 @@ static int intel_init_ring_buffer(struct drm_device *dev,
|
|||||||
ring->dev = dev;
|
ring->dev = dev;
|
||||||
INIT_LIST_HEAD(&ring->active_list);
|
INIT_LIST_HEAD(&ring->active_list);
|
||||||
INIT_LIST_HEAD(&ring->request_list);
|
INIT_LIST_HEAD(&ring->request_list);
|
||||||
ring->size = 32 * PAGE_SIZE;
|
ring->buffer->size = 32 * PAGE_SIZE;
|
||||||
memset(ring->semaphore.sync_seqno, 0, sizeof(ring->semaphore.sync_seqno));
|
memset(ring->semaphore.sync_seqno, 0, sizeof(ring->semaphore.sync_seqno));
|
||||||
|
|
||||||
init_waitqueue_head(&ring->irq_queue);
|
init_waitqueue_head(&ring->irq_queue);
|
||||||
@ -1461,9 +1461,9 @@ static int intel_init_ring_buffer(struct drm_device *dev,
|
|||||||
* the TAIL pointer points to within the last 2 cachelines
|
* the TAIL pointer points to within the last 2 cachelines
|
||||||
* of the buffer.
|
* of the buffer.
|
||||||
*/
|
*/
|
||||||
ring->effective_size = ring->size;
|
ring->buffer->effective_size = ring->buffer->size;
|
||||||
if (IS_I830(dev) || IS_845G(dev))
|
if (IS_I830(dev) || IS_845G(dev))
|
||||||
ring->effective_size -= 2 * CACHELINE_BYTES;
|
ring->buffer->effective_size -= 2 * CACHELINE_BYTES;
|
||||||
|
|
||||||
ret = i915_cmd_parser_init_ring(ring);
|
ret = i915_cmd_parser_init_ring(ring);
|
||||||
if (ret)
|
if (ret)
|
||||||
@ -1485,17 +1485,17 @@ void intel_cleanup_ring_buffer(struct intel_engine_cs *ring)
|
|||||||
{
|
{
|
||||||
struct drm_i915_private *dev_priv = to_i915(ring->dev);
|
struct drm_i915_private *dev_priv = to_i915(ring->dev);
|
||||||
|
|
||||||
if (ring->obj == NULL)
|
if (ring->buffer->obj == NULL)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
intel_stop_ring_buffer(ring);
|
intel_stop_ring_buffer(ring);
|
||||||
WARN_ON((I915_READ_MODE(ring) & MODE_IDLE) == 0);
|
WARN_ON((I915_READ_MODE(ring) & MODE_IDLE) == 0);
|
||||||
|
|
||||||
iounmap(ring->virtual_start);
|
iounmap(ring->buffer->virtual_start);
|
||||||
|
|
||||||
i915_gem_object_ggtt_unpin(ring->obj);
|
i915_gem_object_ggtt_unpin(ring->buffer->obj);
|
||||||
drm_gem_object_unreference(&ring->obj->base);
|
drm_gem_object_unreference(&ring->buffer->obj->base);
|
||||||
ring->obj = NULL;
|
ring->buffer->obj = NULL;
|
||||||
ring->preallocated_lazy_request = NULL;
|
ring->preallocated_lazy_request = NULL;
|
||||||
ring->outstanding_lazy_seqno = 0;
|
ring->outstanding_lazy_seqno = 0;
|
||||||
|
|
||||||
@ -1516,17 +1516,17 @@ static int intel_ring_wait_request(struct intel_engine_cs *ring, int n)
|
|||||||
u32 seqno = 0;
|
u32 seqno = 0;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
if (ring->last_retired_head != -1) {
|
if (ring->buffer->last_retired_head != -1) {
|
||||||
ring->head = ring->last_retired_head;
|
ring->buffer->head = ring->buffer->last_retired_head;
|
||||||
ring->last_retired_head = -1;
|
ring->buffer->last_retired_head = -1;
|
||||||
|
|
||||||
ring->space = ring_space(ring);
|
ring->buffer->space = ring_space(ring);
|
||||||
if (ring->space >= n)
|
if (ring->buffer->space >= n)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
list_for_each_entry(request, &ring->request_list, list) {
|
list_for_each_entry(request, &ring->request_list, list) {
|
||||||
if (__ring_space(request->tail, ring->tail, ring->size) >= n) {
|
if (__ring_space(request->tail, ring->buffer->tail, ring->buffer->size) >= n) {
|
||||||
seqno = request->seqno;
|
seqno = request->seqno;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
@ -1540,10 +1540,10 @@ static int intel_ring_wait_request(struct intel_engine_cs *ring, int n)
|
|||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
i915_gem_retire_requests_ring(ring);
|
i915_gem_retire_requests_ring(ring);
|
||||||
ring->head = ring->last_retired_head;
|
ring->buffer->head = ring->buffer->last_retired_head;
|
||||||
ring->last_retired_head = -1;
|
ring->buffer->last_retired_head = -1;
|
||||||
|
|
||||||
ring->space = ring_space(ring);
|
ring->buffer->space = ring_space(ring);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1570,9 +1570,9 @@ static int ring_wait_for_space(struct intel_engine_cs *ring, int n)
|
|||||||
|
|
||||||
trace_i915_ring_wait_begin(ring);
|
trace_i915_ring_wait_begin(ring);
|
||||||
do {
|
do {
|
||||||
ring->head = I915_READ_HEAD(ring);
|
ring->buffer->head = I915_READ_HEAD(ring);
|
||||||
ring->space = ring_space(ring);
|
ring->buffer->space = ring_space(ring);
|
||||||
if (ring->space >= n) {
|
if (ring->buffer->space >= n) {
|
||||||
ret = 0;
|
ret = 0;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
@ -1608,21 +1608,21 @@ static int ring_wait_for_space(struct intel_engine_cs *ring, int n)
|
|||||||
static int intel_wrap_ring_buffer(struct intel_engine_cs *ring)
|
static int intel_wrap_ring_buffer(struct intel_engine_cs *ring)
|
||||||
{
|
{
|
||||||
uint32_t __iomem *virt;
|
uint32_t __iomem *virt;
|
||||||
int rem = ring->size - ring->tail;
|
int rem = ring->buffer->size - ring->buffer->tail;
|
||||||
|
|
||||||
if (ring->space < rem) {
|
if (ring->buffer->space < rem) {
|
||||||
int ret = ring_wait_for_space(ring, rem);
|
int ret = ring_wait_for_space(ring, rem);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
virt = ring->virtual_start + ring->tail;
|
virt = ring->buffer->virtual_start + ring->buffer->tail;
|
||||||
rem /= 4;
|
rem /= 4;
|
||||||
while (rem--)
|
while (rem--)
|
||||||
iowrite32(MI_NOOP, virt++);
|
iowrite32(MI_NOOP, virt++);
|
||||||
|
|
||||||
ring->tail = 0;
|
ring->buffer->tail = 0;
|
||||||
ring->space = ring_space(ring);
|
ring->buffer->space = ring_space(ring);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -1674,13 +1674,13 @@ static int __intel_ring_prepare(struct intel_engine_cs *ring,
|
|||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
if (unlikely(ring->tail + bytes > ring->effective_size)) {
|
if (unlikely(ring->buffer->tail + bytes > ring->buffer->effective_size)) {
|
||||||
ret = intel_wrap_ring_buffer(ring);
|
ret = intel_wrap_ring_buffer(ring);
|
||||||
if (unlikely(ret))
|
if (unlikely(ret))
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (unlikely(ring->space < bytes)) {
|
if (unlikely(ring->buffer->space < bytes)) {
|
||||||
ret = ring_wait_for_space(ring, bytes);
|
ret = ring_wait_for_space(ring, bytes);
|
||||||
if (unlikely(ret))
|
if (unlikely(ret))
|
||||||
return ret;
|
return ret;
|
||||||
@ -1709,14 +1709,14 @@ int intel_ring_begin(struct intel_engine_cs *ring,
|
|||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
ring->space -= num_dwords * sizeof(uint32_t);
|
ring->buffer->space -= num_dwords * sizeof(uint32_t);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Align the ring tail to a cacheline boundary */
|
/* Align the ring tail to a cacheline boundary */
|
||||||
int intel_ring_cacheline_align(struct intel_engine_cs *ring)
|
int intel_ring_cacheline_align(struct intel_engine_cs *ring)
|
||||||
{
|
{
|
||||||
int num_dwords = (ring->tail & (CACHELINE_BYTES - 1)) / sizeof(uint32_t);
|
int num_dwords = (ring->buffer->tail & (CACHELINE_BYTES - 1)) / sizeof(uint32_t);
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
if (num_dwords == 0)
|
if (num_dwords == 0)
|
||||||
@ -2094,13 +2094,13 @@ int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
|
|||||||
INIT_LIST_HEAD(&ring->active_list);
|
INIT_LIST_HEAD(&ring->active_list);
|
||||||
INIT_LIST_HEAD(&ring->request_list);
|
INIT_LIST_HEAD(&ring->request_list);
|
||||||
|
|
||||||
ring->size = size;
|
ring->buffer->size = size;
|
||||||
ring->effective_size = ring->size;
|
ring->buffer->effective_size = ring->buffer->size;
|
||||||
if (IS_I830(ring->dev) || IS_845G(ring->dev))
|
if (IS_I830(ring->dev) || IS_845G(ring->dev))
|
||||||
ring->effective_size -= 2 * CACHELINE_BYTES;
|
ring->buffer->effective_size -= 2 * CACHELINE_BYTES;
|
||||||
|
|
||||||
ring->virtual_start = ioremap_wc(start, size);
|
ring->buffer->virtual_start = ioremap_wc(start, size);
|
||||||
if (ring->virtual_start == NULL) {
|
if (ring->buffer->virtual_start == NULL) {
|
||||||
DRM_ERROR("can not ioremap virtual address for"
|
DRM_ERROR("can not ioremap virtual address for"
|
||||||
" ring buffer\n");
|
" ring buffer\n");
|
||||||
ret = -ENOMEM;
|
ret = -ENOMEM;
|
||||||
@ -2116,7 +2116,7 @@ int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
|
|||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
err_vstart:
|
err_vstart:
|
||||||
iounmap(ring->virtual_start);
|
iounmap(ring->buffer->virtual_start);
|
||||||
err_ringbuf:
|
err_ringbuf:
|
||||||
kfree(ringbuf);
|
kfree(ringbuf);
|
||||||
ring->buffer = NULL;
|
ring->buffer = NULL;
|
||||||
|
@ -91,28 +91,11 @@ struct intel_engine_cs {
|
|||||||
#define I915_NUM_RINGS 5
|
#define I915_NUM_RINGS 5
|
||||||
#define LAST_USER_RING (VECS + 1)
|
#define LAST_USER_RING (VECS + 1)
|
||||||
u32 mmio_base;
|
u32 mmio_base;
|
||||||
void __iomem *virtual_start;
|
|
||||||
struct drm_device *dev;
|
struct drm_device *dev;
|
||||||
struct drm_i915_gem_object *obj;
|
|
||||||
struct intel_ringbuffer *buffer;
|
struct intel_ringbuffer *buffer;
|
||||||
|
|
||||||
u32 head;
|
|
||||||
u32 tail;
|
|
||||||
int space;
|
|
||||||
int size;
|
|
||||||
int effective_size;
|
|
||||||
struct intel_hw_status_page status_page;
|
struct intel_hw_status_page status_page;
|
||||||
|
|
||||||
/** We track the position of the requests in the ring buffer, and
|
|
||||||
* when each is retired we increment last_retired_head as the GPU
|
|
||||||
* must have finished processing the request and so we know we
|
|
||||||
* can advance the ringbuffer up to that position.
|
|
||||||
*
|
|
||||||
* last_retired_head is set to -1 after the value is consumed so
|
|
||||||
* we can detect new retirements.
|
|
||||||
*/
|
|
||||||
u32 last_retired_head;
|
|
||||||
|
|
||||||
unsigned irq_refcount; /* protected by dev_priv->irq_lock */
|
unsigned irq_refcount; /* protected by dev_priv->irq_lock */
|
||||||
u32 irq_enable_mask; /* bitmask to enable ring interrupt */
|
u32 irq_enable_mask; /* bitmask to enable ring interrupt */
|
||||||
u32 trace_irq_seqno;
|
u32 trace_irq_seqno;
|
||||||
@ -239,7 +222,7 @@ struct intel_engine_cs {
|
|||||||
static inline bool
|
static inline bool
|
||||||
intel_ring_initialized(struct intel_engine_cs *ring)
|
intel_ring_initialized(struct intel_engine_cs *ring)
|
||||||
{
|
{
|
||||||
return ring->buffer && ring->obj;
|
return ring->buffer && ring->buffer->obj;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline unsigned
|
static inline unsigned
|
||||||
@ -310,12 +293,12 @@ int __must_check intel_ring_cacheline_align(struct intel_engine_cs *ring);
|
|||||||
static inline void intel_ring_emit(struct intel_engine_cs *ring,
|
static inline void intel_ring_emit(struct intel_engine_cs *ring,
|
||||||
u32 data)
|
u32 data)
|
||||||
{
|
{
|
||||||
iowrite32(data, ring->virtual_start + ring->tail);
|
iowrite32(data, ring->buffer->virtual_start + ring->buffer->tail);
|
||||||
ring->tail += 4;
|
ring->buffer->tail += 4;
|
||||||
}
|
}
|
||||||
static inline void intel_ring_advance(struct intel_engine_cs *ring)
|
static inline void intel_ring_advance(struct intel_engine_cs *ring)
|
||||||
{
|
{
|
||||||
ring->tail &= ring->size - 1;
|
ring->buffer->tail &= ring->buffer->size - 1;
|
||||||
}
|
}
|
||||||
void __intel_ring_advance(struct intel_engine_cs *ring);
|
void __intel_ring_advance(struct intel_engine_cs *ring);
|
||||||
|
|
||||||
@ -335,7 +318,7 @@ void intel_ring_setup_status_page(struct intel_engine_cs *ring);
|
|||||||
|
|
||||||
static inline u32 intel_ring_get_tail(struct intel_engine_cs *ring)
|
static inline u32 intel_ring_get_tail(struct intel_engine_cs *ring)
|
||||||
{
|
{
|
||||||
return ring->tail;
|
return ring->buffer->tail;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline u32 intel_ring_get_seqno(struct intel_engine_cs *ring)
|
static inline u32 intel_ring_get_seqno(struct intel_engine_cs *ring)
|
||||||
|
Loading…
Reference in New Issue
Block a user