drm/i915: Replace struct_mutex for batch pool serialisation

Switch to tracking activity via i915_active on individual nodes, only
keeping a list of retired objects in the cache, and reaping the cache
when the engine itself idles.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190804124826.30272-2-chris@chris-wilson.co.uk
This commit is contained in:
Chris Wilson 2019-08-04 13:48:26 +01:00
parent a4e57f9031
commit b40d73784f
16 changed files with 289 additions and 265 deletions

View File

@ -72,6 +72,7 @@ obj-y += gt/
gt-y += \
gt/intel_breadcrumbs.o \
gt/intel_context.o \
gt/intel_engine_pool.o \
gt/intel_engine_cs.o \
gt/intel_engine_pm.o \
gt/intel_gt.o \
@ -125,7 +126,6 @@ i915-y += \
$(gem-y) \
i915_active.o \
i915_cmd_parser.o \
i915_gem_batch_pool.o \
i915_gem_evict.o \
i915_gem_fence_reg.o \
i915_gem_gtt.o \

View File

@ -16,6 +16,7 @@
#include "gem/i915_gem_ioctls.h"
#include "gt/intel_context.h"
#include "gt/intel_engine_pool.h"
#include "gt/intel_gt.h"
#include "gt/intel_gt_pm.h"
@ -1198,25 +1199,26 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
unsigned int len)
{
struct reloc_cache *cache = &eb->reloc_cache;
struct drm_i915_gem_object *obj;
struct intel_engine_pool_node *pool;
struct i915_request *rq;
struct i915_vma *batch;
u32 *cmd;
int err;
obj = i915_gem_batch_pool_get(&eb->engine->batch_pool, PAGE_SIZE);
if (IS_ERR(obj))
return PTR_ERR(obj);
pool = intel_engine_pool_get(&eb->engine->pool, PAGE_SIZE);
if (IS_ERR(pool))
return PTR_ERR(pool);
cmd = i915_gem_object_pin_map(obj,
cmd = i915_gem_object_pin_map(pool->obj,
cache->has_llc ?
I915_MAP_FORCE_WB :
I915_MAP_FORCE_WC);
i915_gem_object_unpin_pages(obj);
if (IS_ERR(cmd))
return PTR_ERR(cmd);
if (IS_ERR(cmd)) {
err = PTR_ERR(cmd);
goto out_pool;
}
batch = i915_vma_instance(obj, vma->vm, NULL);
batch = i915_vma_instance(pool->obj, vma->vm, NULL);
if (IS_ERR(batch)) {
err = PTR_ERR(batch);
goto err_unmap;
@ -1232,6 +1234,10 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
goto err_unpin;
}
err = intel_engine_pool_mark_active(pool, rq);
if (err)
goto err_request;
err = reloc_move_to_gpu(rq, vma);
if (err)
goto err_request;
@ -1257,7 +1263,7 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
cache->rq_size = 0;
/* Return with batch mapping (cmd) still pinned */
return 0;
goto out_pool;
skip_request:
i915_request_skip(rq, err);
@ -1266,7 +1272,9 @@ err_request:
err_unpin:
i915_vma_unpin(batch);
err_unmap:
i915_gem_object_unpin_map(obj);
i915_gem_object_unpin_map(pool->obj);
out_pool:
intel_engine_pool_put(pool);
return err;
}
@ -2010,18 +2018,17 @@ static int i915_reset_gen7_sol_offsets(struct i915_request *rq)
static struct i915_vma *eb_parse(struct i915_execbuffer *eb, bool is_master)
{
struct drm_i915_gem_object *shadow_batch_obj;
struct intel_engine_pool_node *pool;
struct i915_vma *vma;
int err;
shadow_batch_obj = i915_gem_batch_pool_get(&eb->engine->batch_pool,
PAGE_ALIGN(eb->batch_len));
if (IS_ERR(shadow_batch_obj))
return ERR_CAST(shadow_batch_obj);
pool = intel_engine_pool_get(&eb->engine->pool, eb->batch_len);
if (IS_ERR(pool))
return ERR_CAST(pool);
err = intel_engine_cmd_parser(eb->engine,
eb->batch->obj,
shadow_batch_obj,
pool->obj,
eb->batch_start_offset,
eb->batch_len,
is_master);
@ -2030,12 +2037,12 @@ static struct i915_vma *eb_parse(struct i915_execbuffer *eb, bool is_master)
vma = NULL;
else
vma = ERR_PTR(err);
goto out;
goto err;
}
vma = i915_gem_object_ggtt_pin(shadow_batch_obj, NULL, 0, 0, 0);
vma = i915_gem_object_ggtt_pin(pool->obj, NULL, 0, 0, 0);
if (IS_ERR(vma))
goto out;
goto err;
eb->vma[eb->buffer_count] = i915_vma_get(vma);
eb->flags[eb->buffer_count] =
@ -2043,8 +2050,11 @@ static struct i915_vma *eb_parse(struct i915_execbuffer *eb, bool is_master)
vma->exec_flags = &eb->flags[eb->buffer_count];
eb->buffer_count++;
out:
i915_gem_object_unpin_pages(shadow_batch_obj);
vma->private = pool;
return vma;
err:
intel_engine_pool_put(pool);
return vma;
}
@ -2588,6 +2598,8 @@ i915_gem_do_execbuffer(struct drm_device *dev,
* to explicitly hold another reference here.
*/
eb.request->batch = eb.batch;
if (eb.batch->private)
intel_engine_pool_mark_active(eb.batch->private, eb.request);
trace_i915_request_queue(eb.request, eb.batch_flags);
err = eb_submit(&eb);
@ -2612,6 +2624,8 @@ err_request:
err_batch_unpin:
if (eb.batch_flags & I915_DISPATCH_SECURE)
i915_vma_unpin(eb.batch);
if (eb.batch->private)
intel_engine_pool_put(eb.batch->private);
err_vma:
if (eb.exec)
eb_release_vmas(&eb);

View File

@ -66,7 +66,6 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
INIT_LIST_HEAD(&obj->mm.link);
INIT_LIST_HEAD(&obj->lut_list);
INIT_LIST_HEAD(&obj->batch_pool_link);
init_rcu_head(&obj->rcu);

View File

@ -114,7 +114,6 @@ struct drm_i915_gem_object {
unsigned int userfault_count;
struct list_head userfault_link;
struct list_head batch_pool_link;
I915_SELFTEST_DECLARE(struct list_head st_link);
/*

View File

@ -34,10 +34,8 @@ static void i915_gem_park(struct drm_i915_private *i915)
lockdep_assert_held(&i915->drm.struct_mutex);
for_each_engine(engine, i915, id) {
for_each_engine(engine, i915, id)
call_idle_barriers(engine); /* cleanup after wedging */
i915_gem_batch_pool_fini(&engine->batch_pool);
}
intel_timelines_park(i915);
i915_vma_parked(i915);

View File

@ -9,7 +9,6 @@
#include <linux/random.h>
#include <linux/seqlock.h>
#include "i915_gem_batch_pool.h"
#include "i915_pmu.h"
#include "i915_reg.h"
#include "i915_request.h"

View File

@ -32,6 +32,7 @@
#include "intel_engine.h"
#include "intel_engine_pm.h"
#include "intel_engine_pool.h"
#include "intel_context.h"
#include "intel_lrc.h"
#include "intel_reset.h"
@ -492,11 +493,6 @@ cleanup:
return err;
}
static void intel_engine_init_batch_pool(struct intel_engine_cs *engine)
{
i915_gem_batch_pool_init(&engine->batch_pool, engine);
}
void intel_engine_init_execlists(struct intel_engine_cs *engine)
{
struct intel_engine_execlists * const execlists = &engine->execlists;
@ -622,10 +618,11 @@ static int intel_engine_setup_common(struct intel_engine_cs *engine)
intel_engine_init_breadcrumbs(engine);
intel_engine_init_execlists(engine);
intel_engine_init_hangcheck(engine);
intel_engine_init_batch_pool(engine);
intel_engine_init_cmd_parser(engine);
intel_engine_init__pm(engine);
intel_engine_pool_init(&engine->pool);
/* Use the whole device by default */
engine->sseu =
intel_sseu_from_device_info(&RUNTIME_INFO(engine->i915)->sseu);
@ -869,9 +866,9 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine)
cleanup_status_page(engine);
intel_engine_pool_fini(&engine->pool);
intel_engine_fini_breadcrumbs(engine);
intel_engine_cleanup_cmd_parser(engine);
i915_gem_batch_pool_fini(&engine->batch_pool);
if (engine->default_state)
i915_gem_object_put(engine->default_state);

View File

@ -8,6 +8,7 @@
#include "intel_engine.h"
#include "intel_engine_pm.h"
#include "intel_engine_pool.h"
#include "intel_gt.h"
#include "intel_gt_pm.h"
@ -116,6 +117,7 @@ static int __engine_park(struct intel_wakeref *wf)
GEM_TRACE("%s\n", engine->name);
intel_engine_disarm_breadcrumbs(engine);
intel_engine_pool_park(&engine->pool);
/* Must be reset upon idling, or we may miss the busy wakeup. */
GEM_BUG_ON(engine->execlists.queue_priority_hint != INT_MIN);

View File

@ -0,0 +1,177 @@
/*
* SPDX-License-Identifier: MIT
*
* Copyright © 2014-2018 Intel Corporation
*/
#include "gem/i915_gem_object.h"
#include "i915_drv.h"
#include "intel_engine_pm.h"
#include "intel_engine_pool.h"
static struct intel_engine_cs *to_engine(struct intel_engine_pool *pool)
{
return container_of(pool, struct intel_engine_cs, pool);
}
static struct list_head *
bucket_for_size(struct intel_engine_pool *pool, size_t sz)
{
int n;
/*
* Compute a power-of-two bucket, but throw everything greater than
* 16KiB into the same bucket: i.e. the buckets hold objects of
* (1 page, 2 pages, 4 pages, 8+ pages).
*/
n = fls(sz >> PAGE_SHIFT) - 1;
if (n >= ARRAY_SIZE(pool->cache_list))
n = ARRAY_SIZE(pool->cache_list) - 1;
return &pool->cache_list[n];
}
static void node_free(struct intel_engine_pool_node *node)
{
i915_gem_object_put(node->obj);
i915_active_fini(&node->active);
kfree(node);
}
static int pool_active(struct i915_active *ref)
{
struct intel_engine_pool_node *node =
container_of(ref, typeof(*node), active);
struct reservation_object *resv = node->obj->base.resv;
int err;
if (reservation_object_trylock(resv)) {
reservation_object_add_excl_fence(resv, NULL);
reservation_object_unlock(resv);
}
err = i915_gem_object_pin_pages(node->obj);
if (err)
return err;
/* Hide this pinned object from the shrinker until retired */
i915_gem_object_make_unshrinkable(node->obj);
return 0;
}
static void pool_retire(struct i915_active *ref)
{
struct intel_engine_pool_node *node =
container_of(ref, typeof(*node), active);
struct intel_engine_pool *pool = node->pool;
struct list_head *list = bucket_for_size(pool, node->obj->base.size);
unsigned long flags;
GEM_BUG_ON(!intel_engine_pm_is_awake(to_engine(pool)));
i915_gem_object_unpin_pages(node->obj);
/* Return this object to the shrinker pool */
i915_gem_object_make_purgeable(node->obj);
spin_lock_irqsave(&pool->lock, flags);
list_add(&node->link, list);
spin_unlock_irqrestore(&pool->lock, flags);
}
static struct intel_engine_pool_node *
node_create(struct intel_engine_pool *pool, size_t sz)
{
struct intel_engine_cs *engine = to_engine(pool);
struct intel_engine_pool_node *node;
struct drm_i915_gem_object *obj;
node = kmalloc(sizeof(*node),
GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
if (!node)
return ERR_PTR(-ENOMEM);
node->pool = pool;
i915_active_init(engine->i915, &node->active, pool_active, pool_retire);
obj = i915_gem_object_create_internal(engine->i915, sz);
if (IS_ERR(obj)) {
i915_active_fini(&node->active);
kfree(node);
return ERR_CAST(obj);
}
node->obj = obj;
return node;
}
struct intel_engine_pool_node *
intel_engine_pool_get(struct intel_engine_pool *pool, size_t size)
{
struct intel_engine_pool_node *node;
struct list_head *list;
unsigned long flags;
int ret;
GEM_BUG_ON(!intel_engine_pm_is_awake(to_engine(pool)));
size = PAGE_ALIGN(size);
list = bucket_for_size(pool, size);
spin_lock_irqsave(&pool->lock, flags);
list_for_each_entry(node, list, link) {
if (node->obj->base.size < size)
continue;
list_del(&node->link);
break;
}
spin_unlock_irqrestore(&pool->lock, flags);
if (&node->link == list) {
node = node_create(pool, size);
if (IS_ERR(node))
return node;
}
ret = i915_active_acquire(&node->active);
if (ret) {
node_free(node);
return ERR_PTR(ret);
}
return node;
}
void intel_engine_pool_init(struct intel_engine_pool *pool)
{
int n;
spin_lock_init(&pool->lock);
for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++)
INIT_LIST_HEAD(&pool->cache_list[n]);
}
void intel_engine_pool_park(struct intel_engine_pool *pool)
{
int n;
for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++) {
struct list_head *list = &pool->cache_list[n];
struct intel_engine_pool_node *node, *nn;
list_for_each_entry_safe(node, nn, list, link)
node_free(node);
INIT_LIST_HEAD(list);
}
}
void intel_engine_pool_fini(struct intel_engine_pool *pool)
{
int n;
for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++)
GEM_BUG_ON(!list_empty(&pool->cache_list[n]));
}

View File

@ -0,0 +1,34 @@
/*
* SPDX-License-Identifier: MIT
*
* Copyright © 2014-2018 Intel Corporation
*/
#ifndef INTEL_ENGINE_POOL_H
#define INTEL_ENGINE_POOL_H
#include "intel_engine_pool_types.h"
#include "i915_active.h"
#include "i915_request.h"
struct intel_engine_pool_node *
intel_engine_pool_get(struct intel_engine_pool *pool, size_t size);
static inline int
intel_engine_pool_mark_active(struct intel_engine_pool_node *node,
struct i915_request *rq)
{
return i915_active_ref(&node->active, rq->fence.context, rq);
}
static inline void
intel_engine_pool_put(struct intel_engine_pool_node *node)
{
i915_active_release(&node->active);
}
void intel_engine_pool_init(struct intel_engine_pool *pool);
void intel_engine_pool_park(struct intel_engine_pool *pool);
void intel_engine_pool_fini(struct intel_engine_pool *pool);
#endif /* INTEL_ENGINE_POOL_H */

View File

@ -0,0 +1,29 @@
/*
* SPDX-License-Identifier: MIT
*
* Copyright © 2014-2018 Intel Corporation
*/
#ifndef INTEL_ENGINE_POOL_TYPES_H
#define INTEL_ENGINE_POOL_TYPES_H
#include <linux/list.h>
#include <linux/spinlock.h>
#include "i915_active_types.h"
struct drm_i915_gem_object;
struct intel_engine_pool {
spinlock_t lock;
struct list_head cache_list[4];
};
struct intel_engine_pool_node {
struct i915_active active;
struct drm_i915_gem_object *obj;
struct list_head link;
struct intel_engine_pool *pool;
};
#endif /* INTEL_ENGINE_POOL_TYPES_H */

View File

@ -16,12 +16,12 @@
#include <linux/types.h>
#include "i915_gem.h"
#include "i915_gem_batch_pool.h"
#include "i915_pmu.h"
#include "i915_priolist_types.h"
#include "i915_selftest.h"
#include "gt/intel_timeline_types.h"
#include "intel_engine_pool_types.h"
#include "intel_sseu.h"
#include "intel_timeline_types.h"
#include "intel_wakeref.h"
#include "intel_workarounds_types.h"
@ -354,7 +354,7 @@ struct intel_engine_cs {
* when the command parser is enabled. Prevents the client from
* modifying the batch contents after software parsing.
*/
struct i915_gem_batch_pool batch_pool;
struct intel_engine_pool pool;
struct intel_hw_status_page status_page;
struct i915_ctx_workarounds wa_ctx;

View File

@ -27,6 +27,7 @@
#include "i915_drv.h"
#include "intel_context.h"
#include "intel_engine_pm.h"
#include "intel_engine_pool.h"
#include "mock_engine.h"
#include "selftests/mock_request.h"
@ -292,6 +293,7 @@ int mock_engine_init(struct intel_engine_cs *engine)
intel_engine_init_breadcrumbs(engine);
intel_engine_init_execlists(engine);
intel_engine_init__pm(engine);
intel_engine_pool_init(&engine->pool);
engine->kernel_context =
i915_gem_context_get_engine(i915->kernel_context, engine->id);

View File

@ -295,27 +295,6 @@ static int per_file_stats(int id, void *ptr, void *data)
stats.closed); \
} while (0)
static void print_batch_pool_stats(struct seq_file *m,
struct drm_i915_private *dev_priv)
{
struct drm_i915_gem_object *obj;
struct intel_engine_cs *engine;
struct file_stats stats = {};
enum intel_engine_id id;
int j;
for_each_engine(engine, dev_priv, id) {
for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) {
list_for_each_entry(obj,
&engine->batch_pool.cache_list[j],
batch_pool_link)
per_file_stats(0, obj, &stats);
}
}
print_file_stats(m, "[k]batch pool", stats);
}
static void print_context_stats(struct seq_file *m,
struct drm_i915_private *i915)
{
@ -374,58 +353,12 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
if (ret)
return ret;
print_batch_pool_stats(m, i915);
print_context_stats(m, i915);
mutex_unlock(&i915->drm.struct_mutex);
return 0;
}
static int i915_gem_batch_pool_info(struct seq_file *m, void *data)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
struct drm_device *dev = &dev_priv->drm;
struct drm_i915_gem_object *obj;
struct intel_engine_cs *engine;
enum intel_engine_id id;
int total = 0;
int ret, j;
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
return ret;
for_each_engine(engine, dev_priv, id) {
for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) {
int count;
count = 0;
list_for_each_entry(obj,
&engine->batch_pool.cache_list[j],
batch_pool_link)
count++;
seq_printf(m, "%s cache[%d]: %d objects\n",
engine->name, j, count);
list_for_each_entry(obj,
&engine->batch_pool.cache_list[j],
batch_pool_link) {
seq_puts(m, " ");
describe_obj(m, obj);
seq_putc(m, '\n');
}
total += count;
}
}
seq_printf(m, "total: %d\n", total);
mutex_unlock(&dev->struct_mutex);
return 0;
}
static void gen8_display_interrupt_info(struct seq_file *m)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
@ -4385,7 +4318,6 @@ static const struct drm_info_list i915_debugfs_list[] = {
{"i915_gem_objects", i915_gem_object_info, 0},
{"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
{"i915_gem_interrupt", i915_interrupt_info, 0},
{"i915_gem_batch_pool", i915_gem_batch_pool_info, 0},
{"i915_guc_info", i915_guc_info, 0},
{"i915_guc_load_status", i915_guc_load_status_info, 0},
{"i915_guc_log_dump", i915_guc_log_dump, 0},

View File

@ -1,132 +0,0 @@
/*
* SPDX-License-Identifier: MIT
*
* Copyright © 2014-2018 Intel Corporation
*/
#include "i915_gem_batch_pool.h"
#include "i915_drv.h"
/**
* DOC: batch pool
*
* In order to submit batch buffers as 'secure', the software command parser
* must ensure that a batch buffer cannot be modified after parsing. It does
* this by copying the user provided batch buffer contents to a kernel owned
* buffer from which the hardware will actually execute, and by carefully
* managing the address space bindings for such buffers.
*
* The batch pool framework provides a mechanism for the driver to manage a
* set of scratch buffers to use for this purpose. The framework can be
* extended to support other uses cases should they arise.
*/
/**
* i915_gem_batch_pool_init() - initialize a batch buffer pool
* @pool: the batch buffer pool
* @engine: the associated request submission engine
*/
void i915_gem_batch_pool_init(struct i915_gem_batch_pool *pool,
struct intel_engine_cs *engine)
{
int n;
pool->engine = engine;
for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++)
INIT_LIST_HEAD(&pool->cache_list[n]);
}
/**
* i915_gem_batch_pool_fini() - clean up a batch buffer pool
* @pool: the pool to clean up
*
* Note: Callers must hold the struct_mutex.
*/
void i915_gem_batch_pool_fini(struct i915_gem_batch_pool *pool)
{
int n;
lockdep_assert_held(&pool->engine->i915->drm.struct_mutex);
for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++) {
struct drm_i915_gem_object *obj, *next;
list_for_each_entry_safe(obj, next,
&pool->cache_list[n],
batch_pool_link)
i915_gem_object_put(obj);
INIT_LIST_HEAD(&pool->cache_list[n]);
}
}
/**
* i915_gem_batch_pool_get() - allocate a buffer from the pool
* @pool: the batch buffer pool
* @size: the minimum desired size of the returned buffer
*
* Returns an inactive buffer from @pool with at least @size bytes,
* with the pages pinned. The caller must i915_gem_object_unpin_pages()
* on the returned object.
*
* Note: Callers must hold the struct_mutex
*
* Return: the buffer object or an error pointer
*/
struct drm_i915_gem_object *
i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool,
size_t size)
{
struct drm_i915_gem_object *obj;
struct list_head *list;
int n, ret;
lockdep_assert_held(&pool->engine->i915->drm.struct_mutex);
/* Compute a power-of-two bucket, but throw everything greater than
* 16KiB into the same bucket: i.e. the the buckets hold objects of
* (1 page, 2 pages, 4 pages, 8+ pages).
*/
n = fls(size >> PAGE_SHIFT) - 1;
if (n >= ARRAY_SIZE(pool->cache_list))
n = ARRAY_SIZE(pool->cache_list) - 1;
list = &pool->cache_list[n];
list_for_each_entry(obj, list, batch_pool_link) {
struct reservation_object *resv = obj->base.resv;
/* The batches are strictly LRU ordered */
if (!reservation_object_test_signaled_rcu(resv, true))
break;
/*
* The object is now idle, clear the array of shared
* fences before we add a new request. Although, we
* remain on the same engine, we may be on a different
* timeline and so may continually grow the array,
* trapping a reference to all the old fences, rather
* than replace the existing fence.
*/
if (rcu_access_pointer(resv->fence)) {
reservation_object_lock(resv, NULL);
reservation_object_add_excl_fence(resv, NULL);
reservation_object_unlock(resv);
}
if (obj->base.size >= size)
goto found;
}
obj = i915_gem_object_create_internal(pool->engine->i915, size);
if (IS_ERR(obj))
return obj;
found:
ret = i915_gem_object_pin_pages(obj);
if (ret)
return ERR_PTR(ret);
list_move_tail(&obj->batch_pool_link, list);
return obj;
}

View File

@ -1,26 +0,0 @@
/*
* SPDX-License-Identifier: MIT
*
* Copyright © 2014-2018 Intel Corporation
*/
#ifndef I915_GEM_BATCH_POOL_H
#define I915_GEM_BATCH_POOL_H
#include <linux/types.h>
struct drm_i915_gem_object;
struct intel_engine_cs;
struct i915_gem_batch_pool {
struct intel_engine_cs *engine;
struct list_head cache_list[4];
};
void i915_gem_batch_pool_init(struct i915_gem_batch_pool *pool,
struct intel_engine_cs *engine);
void i915_gem_batch_pool_fini(struct i915_gem_batch_pool *pool);
struct drm_i915_gem_object *
i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool, size_t size);
#endif /* I915_GEM_BATCH_POOL_H */