drm/i915: Refactor common ringbuffer allocation code
A small, very small, step to sharing the duplicate code between execlists and legacy submission engines, starting with the ringbuffer allocation code. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Arun Siluvery <arun.siluvery@linux.intel.com> Cc: Mika Kuoppala <mika.kuoppala@intel.com> Cc: Dave Gordon <david.s.gordon@intel.com> Reviewed-by: Paulo Zanoni <paulo.r.zanoni@intel.com> Reviewed-by: Mika Kuoppala <mika.kuoppala@intel.com> Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
This commit is contained in:
parent
742f491d2c
commit
01101fa7cc
@ -2340,8 +2340,7 @@ void intel_lr_context_free(struct intel_context *ctx)
|
|||||||
i915_gem_object_ggtt_unpin(ctx_obj);
|
i915_gem_object_ggtt_unpin(ctx_obj);
|
||||||
}
|
}
|
||||||
WARN_ON(ctx->engine[ring->id].pin_count);
|
WARN_ON(ctx->engine[ring->id].pin_count);
|
||||||
intel_destroy_ringbuffer_obj(ringbuf);
|
intel_ringbuffer_free(ringbuf);
|
||||||
kfree(ringbuf);
|
|
||||||
drm_gem_object_unreference(&ctx_obj->base);
|
drm_gem_object_unreference(&ctx_obj->base);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -2442,42 +2441,20 @@ int intel_lr_context_deferred_create(struct intel_context *ctx,
|
|||||||
I915_WRITE(GEN8_GTCR, GEN8_GTCR_INVALIDATE);
|
I915_WRITE(GEN8_GTCR, GEN8_GTCR_INVALIDATE);
|
||||||
}
|
}
|
||||||
|
|
||||||
ringbuf = kzalloc(sizeof(*ringbuf), GFP_KERNEL);
|
ringbuf = intel_engine_create_ringbuffer(ring, 4 * PAGE_SIZE);
|
||||||
if (!ringbuf) {
|
if (IS_ERR(ringbuf)) {
|
||||||
DRM_DEBUG_DRIVER("Failed to allocate ringbuffer %s\n",
|
ret = PTR_ERR(ringbuf);
|
||||||
ring->name);
|
|
||||||
ret = -ENOMEM;
|
|
||||||
goto error_unpin_ctx;
|
goto error_unpin_ctx;
|
||||||
}
|
}
|
||||||
|
|
||||||
ringbuf->ring = ring;
|
if (is_global_default_ctx) {
|
||||||
|
ret = intel_pin_and_map_ringbuffer_obj(dev, ringbuf);
|
||||||
ringbuf->size = 4 * PAGE_SIZE;
|
|
||||||
ringbuf->effective_size = ringbuf->size;
|
|
||||||
ringbuf->head = 0;
|
|
||||||
ringbuf->tail = 0;
|
|
||||||
ringbuf->last_retired_head = -1;
|
|
||||||
intel_ring_update_space(ringbuf);
|
|
||||||
|
|
||||||
if (ringbuf->obj == NULL) {
|
|
||||||
ret = intel_alloc_ringbuffer_obj(dev, ringbuf);
|
|
||||||
if (ret) {
|
if (ret) {
|
||||||
DRM_DEBUG_DRIVER(
|
DRM_ERROR(
|
||||||
"Failed to allocate ringbuffer obj %s: %d\n",
|
"Failed to pin and map ringbuffer %s: %d\n",
|
||||||
ring->name, ret);
|
ring->name, ret);
|
||||||
goto error_free_rbuf;
|
goto error_ringbuf;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (is_global_default_ctx) {
|
|
||||||
ret = intel_pin_and_map_ringbuffer_obj(dev, ringbuf);
|
|
||||||
if (ret) {
|
|
||||||
DRM_ERROR(
|
|
||||||
"Failed to pin and map ringbuffer %s: %d\n",
|
|
||||||
ring->name, ret);
|
|
||||||
goto error_destroy_rbuf;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = populate_lr_context(ctx, ctx_obj, ring, ringbuf);
|
ret = populate_lr_context(ctx, ctx_obj, ring, ringbuf);
|
||||||
@ -2519,10 +2496,8 @@ int intel_lr_context_deferred_create(struct intel_context *ctx,
|
|||||||
error:
|
error:
|
||||||
if (is_global_default_ctx)
|
if (is_global_default_ctx)
|
||||||
intel_unpin_ringbuffer_obj(ringbuf);
|
intel_unpin_ringbuffer_obj(ringbuf);
|
||||||
error_destroy_rbuf:
|
error_ringbuf:
|
||||||
intel_destroy_ringbuffer_obj(ringbuf);
|
intel_ringbuffer_free(ringbuf);
|
||||||
error_free_rbuf:
|
|
||||||
kfree(ringbuf);
|
|
||||||
error_unpin_ctx:
|
error_unpin_ctx:
|
||||||
if (is_global_default_ctx)
|
if (is_global_default_ctx)
|
||||||
i915_gem_object_ggtt_unpin(ctx_obj);
|
i915_gem_object_ggtt_unpin(ctx_obj);
|
||||||
|
@ -1996,14 +1996,14 @@ int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf)
|
static void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf)
|
||||||
{
|
{
|
||||||
drm_gem_object_unreference(&ringbuf->obj->base);
|
drm_gem_object_unreference(&ringbuf->obj->base);
|
||||||
ringbuf->obj = NULL;
|
ringbuf->obj = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
int intel_alloc_ringbuffer_obj(struct drm_device *dev,
|
static int intel_alloc_ringbuffer_obj(struct drm_device *dev,
|
||||||
struct intel_ringbuffer *ringbuf)
|
struct intel_ringbuffer *ringbuf)
|
||||||
{
|
{
|
||||||
struct drm_i915_gem_object *obj;
|
struct drm_i915_gem_object *obj;
|
||||||
|
|
||||||
@ -2023,6 +2023,48 @@ int intel_alloc_ringbuffer_obj(struct drm_device *dev,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
struct intel_ringbuffer *
|
||||||
|
intel_engine_create_ringbuffer(struct intel_engine_cs *engine, int size)
|
||||||
|
{
|
||||||
|
struct intel_ringbuffer *ring;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
ring = kzalloc(sizeof(*ring), GFP_KERNEL);
|
||||||
|
if (ring == NULL)
|
||||||
|
return ERR_PTR(-ENOMEM);
|
||||||
|
|
||||||
|
ring->ring = engine;
|
||||||
|
|
||||||
|
ring->size = size;
|
||||||
|
/* Workaround an erratum on the i830 which causes a hang if
|
||||||
|
* the TAIL pointer points to within the last 2 cachelines
|
||||||
|
* of the buffer.
|
||||||
|
*/
|
||||||
|
ring->effective_size = size;
|
||||||
|
if (IS_I830(engine->dev) || IS_845G(engine->dev))
|
||||||
|
ring->effective_size -= 2 * CACHELINE_BYTES;
|
||||||
|
|
||||||
|
ring->last_retired_head = -1;
|
||||||
|
intel_ring_update_space(ring);
|
||||||
|
|
||||||
|
ret = intel_alloc_ringbuffer_obj(engine->dev, ring);
|
||||||
|
if (ret) {
|
||||||
|
DRM_ERROR("Failed to allocate ringbuffer %s: %d\n",
|
||||||
|
engine->name, ret);
|
||||||
|
kfree(ring);
|
||||||
|
return ERR_PTR(ret);
|
||||||
|
}
|
||||||
|
|
||||||
|
return ring;
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
intel_ringbuffer_free(struct intel_ringbuffer *ring)
|
||||||
|
{
|
||||||
|
intel_destroy_ringbuffer_obj(ring);
|
||||||
|
kfree(ring);
|
||||||
|
}
|
||||||
|
|
||||||
static int intel_init_ring_buffer(struct drm_device *dev,
|
static int intel_init_ring_buffer(struct drm_device *dev,
|
||||||
struct intel_engine_cs *ring)
|
struct intel_engine_cs *ring)
|
||||||
{
|
{
|
||||||
@ -2031,22 +2073,20 @@ static int intel_init_ring_buffer(struct drm_device *dev,
|
|||||||
|
|
||||||
WARN_ON(ring->buffer);
|
WARN_ON(ring->buffer);
|
||||||
|
|
||||||
ringbuf = kzalloc(sizeof(*ringbuf), GFP_KERNEL);
|
|
||||||
if (!ringbuf)
|
|
||||||
return -ENOMEM;
|
|
||||||
ring->buffer = ringbuf;
|
|
||||||
|
|
||||||
ring->dev = dev;
|
ring->dev = dev;
|
||||||
INIT_LIST_HEAD(&ring->active_list);
|
INIT_LIST_HEAD(&ring->active_list);
|
||||||
INIT_LIST_HEAD(&ring->request_list);
|
INIT_LIST_HEAD(&ring->request_list);
|
||||||
INIT_LIST_HEAD(&ring->execlist_queue);
|
INIT_LIST_HEAD(&ring->execlist_queue);
|
||||||
i915_gem_batch_pool_init(dev, &ring->batch_pool);
|
i915_gem_batch_pool_init(dev, &ring->batch_pool);
|
||||||
ringbuf->size = 32 * PAGE_SIZE;
|
|
||||||
ringbuf->ring = ring;
|
|
||||||
memset(ring->semaphore.sync_seqno, 0, sizeof(ring->semaphore.sync_seqno));
|
memset(ring->semaphore.sync_seqno, 0, sizeof(ring->semaphore.sync_seqno));
|
||||||
|
|
||||||
init_waitqueue_head(&ring->irq_queue);
|
init_waitqueue_head(&ring->irq_queue);
|
||||||
|
|
||||||
|
ringbuf = intel_engine_create_ringbuffer(ring, 32 * PAGE_SIZE);
|
||||||
|
if (IS_ERR(ringbuf))
|
||||||
|
return PTR_ERR(ringbuf);
|
||||||
|
ring->buffer = ringbuf;
|
||||||
|
|
||||||
if (I915_NEED_GFX_HWS(dev)) {
|
if (I915_NEED_GFX_HWS(dev)) {
|
||||||
ret = init_status_page(ring);
|
ret = init_status_page(ring);
|
||||||
if (ret)
|
if (ret)
|
||||||
@ -2058,15 +2098,6 @@ static int intel_init_ring_buffer(struct drm_device *dev,
|
|||||||
goto error;
|
goto error;
|
||||||
}
|
}
|
||||||
|
|
||||||
WARN_ON(ringbuf->obj);
|
|
||||||
|
|
||||||
ret = intel_alloc_ringbuffer_obj(dev, ringbuf);
|
|
||||||
if (ret) {
|
|
||||||
DRM_ERROR("Failed to allocate ringbuffer %s: %d\n",
|
|
||||||
ring->name, ret);
|
|
||||||
goto error;
|
|
||||||
}
|
|
||||||
|
|
||||||
ret = intel_pin_and_map_ringbuffer_obj(dev, ringbuf);
|
ret = intel_pin_and_map_ringbuffer_obj(dev, ringbuf);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
DRM_ERROR("Failed to pin and map ringbuffer %s: %d\n",
|
DRM_ERROR("Failed to pin and map ringbuffer %s: %d\n",
|
||||||
@ -2075,14 +2106,6 @@ static int intel_init_ring_buffer(struct drm_device *dev,
|
|||||||
goto error;
|
goto error;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Workaround an erratum on the i830 which causes a hang if
|
|
||||||
* the TAIL pointer points to within the last 2 cachelines
|
|
||||||
* of the buffer.
|
|
||||||
*/
|
|
||||||
ringbuf->effective_size = ringbuf->size;
|
|
||||||
if (IS_I830(dev) || IS_845G(dev))
|
|
||||||
ringbuf->effective_size -= 2 * CACHELINE_BYTES;
|
|
||||||
|
|
||||||
ret = i915_cmd_parser_init_ring(ring);
|
ret = i915_cmd_parser_init_ring(ring);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto error;
|
goto error;
|
||||||
@ -2090,7 +2113,7 @@ static int intel_init_ring_buffer(struct drm_device *dev,
|
|||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
error:
|
error:
|
||||||
kfree(ringbuf);
|
intel_ringbuffer_free(ringbuf);
|
||||||
ring->buffer = NULL;
|
ring->buffer = NULL;
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
@ -2098,19 +2121,18 @@ error:
|
|||||||
void intel_cleanup_ring_buffer(struct intel_engine_cs *ring)
|
void intel_cleanup_ring_buffer(struct intel_engine_cs *ring)
|
||||||
{
|
{
|
||||||
struct drm_i915_private *dev_priv;
|
struct drm_i915_private *dev_priv;
|
||||||
struct intel_ringbuffer *ringbuf;
|
|
||||||
|
|
||||||
if (!intel_ring_initialized(ring))
|
if (!intel_ring_initialized(ring))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
dev_priv = to_i915(ring->dev);
|
dev_priv = to_i915(ring->dev);
|
||||||
ringbuf = ring->buffer;
|
|
||||||
|
|
||||||
intel_stop_ring_buffer(ring);
|
intel_stop_ring_buffer(ring);
|
||||||
WARN_ON(!IS_GEN2(ring->dev) && (I915_READ_MODE(ring) & MODE_IDLE) == 0);
|
WARN_ON(!IS_GEN2(ring->dev) && (I915_READ_MODE(ring) & MODE_IDLE) == 0);
|
||||||
|
|
||||||
intel_unpin_ringbuffer_obj(ringbuf);
|
intel_unpin_ringbuffer_obj(ring->buffer);
|
||||||
intel_destroy_ringbuffer_obj(ringbuf);
|
intel_ringbuffer_free(ring->buffer);
|
||||||
|
ring->buffer = NULL;
|
||||||
|
|
||||||
if (ring->cleanup)
|
if (ring->cleanup)
|
||||||
ring->cleanup(ring);
|
ring->cleanup(ring);
|
||||||
@ -2119,9 +2141,6 @@ void intel_cleanup_ring_buffer(struct intel_engine_cs *ring)
|
|||||||
|
|
||||||
i915_cmd_parser_fini_ring(ring);
|
i915_cmd_parser_fini_ring(ring);
|
||||||
i915_gem_batch_pool_fini(&ring->batch_pool);
|
i915_gem_batch_pool_fini(&ring->batch_pool);
|
||||||
|
|
||||||
kfree(ringbuf);
|
|
||||||
ring->buffer = NULL;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int ring_wait_for_space(struct intel_engine_cs *ring, int n)
|
static int ring_wait_for_space(struct intel_engine_cs *ring, int n)
|
||||||
|
@ -420,12 +420,12 @@ intel_write_status_page(struct intel_engine_cs *ring,
|
|||||||
#define I915_GEM_HWS_SCRATCH_INDEX 0x40
|
#define I915_GEM_HWS_SCRATCH_INDEX 0x40
|
||||||
#define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
|
#define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
|
||||||
|
|
||||||
void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf);
|
struct intel_ringbuffer *
|
||||||
|
intel_engine_create_ringbuffer(struct intel_engine_cs *engine, int size);
|
||||||
int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
|
int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
|
||||||
struct intel_ringbuffer *ringbuf);
|
struct intel_ringbuffer *ringbuf);
|
||||||
void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf);
|
void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf);
|
||||||
int intel_alloc_ringbuffer_obj(struct drm_device *dev,
|
void intel_ringbuffer_free(struct intel_ringbuffer *ring);
|
||||||
struct intel_ringbuffer *ringbuf);
|
|
||||||
|
|
||||||
void intel_stop_ring_buffer(struct intel_engine_cs *ring);
|
void intel_stop_ring_buffer(struct intel_engine_cs *ring);
|
||||||
void intel_cleanup_ring_buffer(struct intel_engine_cs *ring);
|
void intel_cleanup_ring_buffer(struct intel_engine_cs *ring);
|
||||||
|
Loading…
Reference in New Issue
Block a user