mirror of
https://github.com/torvalds/linux.git
synced 2024-11-22 12:11:40 +00:00
drm/exec: Pass in initial # of objects
In cases where the # is known ahead of time, it is silly to do the table resize dance. Signed-off-by: Rob Clark <robdclark@chromium.org> Reviewed-by: Christian König <christian.koenig@amd.com> Patchwork: https://patchwork.freedesktop.org/patch/568338/
This commit is contained in:
parent
3a48a40387
commit
05d249352f
@ -1137,7 +1137,7 @@ static int reserve_bo_and_vm(struct kgd_mem *mem,
|
||||
|
||||
ctx->n_vms = 1;
|
||||
ctx->sync = &mem->sync;
|
||||
drm_exec_init(&ctx->exec, DRM_EXEC_INTERRUPTIBLE_WAIT);
|
||||
drm_exec_init(&ctx->exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0);
|
||||
drm_exec_until_all_locked(&ctx->exec) {
|
||||
ret = amdgpu_vm_lock_pd(vm, &ctx->exec, 2);
|
||||
drm_exec_retry_on_contention(&ctx->exec);
|
||||
@ -1176,7 +1176,7 @@ static int reserve_bo_and_cond_vms(struct kgd_mem *mem,
|
||||
int ret;
|
||||
|
||||
ctx->sync = &mem->sync;
|
||||
drm_exec_init(&ctx->exec, DRM_EXEC_INTERRUPTIBLE_WAIT);
|
||||
drm_exec_init(&ctx->exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0);
|
||||
drm_exec_until_all_locked(&ctx->exec) {
|
||||
ctx->n_vms = 0;
|
||||
list_for_each_entry(entry, &mem->attachments, list) {
|
||||
@ -2552,7 +2552,7 @@ static int validate_invalid_user_pages(struct amdkfd_process_info *process_info)
|
||||
|
||||
amdgpu_sync_create(&sync);
|
||||
|
||||
drm_exec_init(&exec, 0);
|
||||
drm_exec_init(&exec, 0, 0);
|
||||
/* Reserve all BOs and page tables for validation */
|
||||
drm_exec_until_all_locked(&exec) {
|
||||
/* Reserve all the page directories */
|
||||
@ -2793,7 +2793,7 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
|
||||
|
||||
mutex_lock(&process_info->lock);
|
||||
|
||||
drm_exec_init(&exec, 0);
|
||||
drm_exec_init(&exec, 0, 0);
|
||||
drm_exec_until_all_locked(&exec) {
|
||||
list_for_each_entry(peer_vm, &process_info->vm_list_head,
|
||||
vm_list_node) {
|
||||
|
@ -66,7 +66,7 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p,
|
||||
|
||||
amdgpu_sync_create(&p->sync);
|
||||
drm_exec_init(&p->exec, DRM_EXEC_INTERRUPTIBLE_WAIT |
|
||||
DRM_EXEC_IGNORE_DUPLICATES);
|
||||
DRM_EXEC_IGNORE_DUPLICATES, 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -70,7 +70,7 @@ int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
|
||||
struct drm_exec exec;
|
||||
int r;
|
||||
|
||||
drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT);
|
||||
drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0);
|
||||
drm_exec_until_all_locked(&exec) {
|
||||
r = amdgpu_vm_lock_pd(vm, &exec, 0);
|
||||
if (likely(!r))
|
||||
@ -110,7 +110,7 @@ int amdgpu_unmap_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
|
||||
struct drm_exec exec;
|
||||
int r;
|
||||
|
||||
drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT);
|
||||
drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0);
|
||||
drm_exec_until_all_locked(&exec) {
|
||||
r = amdgpu_vm_lock_pd(vm, &exec, 0);
|
||||
if (likely(!r))
|
||||
|
@ -203,7 +203,7 @@ static void amdgpu_gem_object_close(struct drm_gem_object *obj,
|
||||
struct drm_exec exec;
|
||||
long r;
|
||||
|
||||
drm_exec_init(&exec, DRM_EXEC_IGNORE_DUPLICATES);
|
||||
drm_exec_init(&exec, DRM_EXEC_IGNORE_DUPLICATES, 0);
|
||||
drm_exec_until_all_locked(&exec) {
|
||||
r = drm_exec_prepare_obj(&exec, &bo->tbo.base, 1);
|
||||
drm_exec_retry_on_contention(&exec);
|
||||
@ -739,7 +739,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
|
||||
}
|
||||
|
||||
drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT |
|
||||
DRM_EXEC_IGNORE_DUPLICATES);
|
||||
DRM_EXEC_IGNORE_DUPLICATES, 0);
|
||||
drm_exec_until_all_locked(&exec) {
|
||||
if (gobj) {
|
||||
r = drm_exec_lock_obj(&exec, gobj);
|
||||
|
@ -1122,7 +1122,7 @@ int amdgpu_mes_ctx_map_meta_data(struct amdgpu_device *adev,
|
||||
|
||||
amdgpu_sync_create(&sync);
|
||||
|
||||
drm_exec_init(&exec, 0);
|
||||
drm_exec_init(&exec, 0, 0);
|
||||
drm_exec_until_all_locked(&exec) {
|
||||
r = drm_exec_lock_obj(&exec,
|
||||
&ctx_data->meta_data_obj->tbo.base);
|
||||
@ -1193,7 +1193,7 @@ int amdgpu_mes_ctx_unmap_meta_data(struct amdgpu_device *adev,
|
||||
struct drm_exec exec;
|
||||
long r;
|
||||
|
||||
drm_exec_init(&exec, 0);
|
||||
drm_exec_init(&exec, 0, 0);
|
||||
drm_exec_until_all_locked(&exec) {
|
||||
r = drm_exec_lock_obj(&exec,
|
||||
&ctx_data->meta_data_obj->tbo.base);
|
||||
|
@ -86,7 +86,7 @@ static int map_ring_data(struct amdgpu_device *adev, struct amdgpu_vm *vm,
|
||||
|
||||
amdgpu_sync_create(&sync);
|
||||
|
||||
drm_exec_init(&exec, 0);
|
||||
drm_exec_init(&exec, 0, 0);
|
||||
drm_exec_until_all_locked(&exec) {
|
||||
r = drm_exec_lock_obj(&exec, &bo->tbo.base);
|
||||
drm_exec_retry_on_contention(&exec);
|
||||
@ -149,7 +149,7 @@ static int unmap_ring_data(struct amdgpu_device *adev, struct amdgpu_vm *vm,
|
||||
struct drm_exec exec;
|
||||
long r;
|
||||
|
||||
drm_exec_init(&exec, 0);
|
||||
drm_exec_init(&exec, 0, 0);
|
||||
drm_exec_until_all_locked(&exec) {
|
||||
r = drm_exec_lock_obj(&exec, &bo->tbo.base);
|
||||
drm_exec_retry_on_contention(&exec);
|
||||
|
@ -1529,7 +1529,7 @@ static int svm_range_reserve_bos(struct svm_validate_context *ctx, bool intr)
|
||||
uint32_t gpuidx;
|
||||
int r;
|
||||
|
||||
drm_exec_init(&ctx->exec, intr ? DRM_EXEC_INTERRUPTIBLE_WAIT: 0);
|
||||
drm_exec_init(&ctx->exec, intr ? DRM_EXEC_INTERRUPTIBLE_WAIT: 0, 0);
|
||||
drm_exec_until_all_locked(&ctx->exec) {
|
||||
for_each_set_bit(gpuidx, ctx->bitmap, MAX_GPU_INSTANCE) {
|
||||
pdd = kfd_process_device_from_gpuidx(ctx->process, gpuidx);
|
||||
|
@ -69,16 +69,23 @@ static void drm_exec_unlock_all(struct drm_exec *exec)
|
||||
* drm_exec_init - initialize a drm_exec object
|
||||
* @exec: the drm_exec object to initialize
|
||||
* @flags: controls locking behavior, see DRM_EXEC_* defines
|
||||
* @nr: the initial # of objects
|
||||
*
|
||||
* Initialize the object and make sure that we can track locked objects.
|
||||
*
|
||||
* If nr is non-zero then it is used as the initial objects table size.
|
||||
* In either case, the table will grow (be re-allocated) on demand.
|
||||
*/
|
||||
void drm_exec_init(struct drm_exec *exec, uint32_t flags)
|
||||
void drm_exec_init(struct drm_exec *exec, uint32_t flags, unsigned nr)
|
||||
{
|
||||
if (!nr)
|
||||
nr = PAGE_SIZE / sizeof(void *);
|
||||
|
||||
exec->flags = flags;
|
||||
exec->objects = kmalloc(PAGE_SIZE, GFP_KERNEL);
|
||||
exec->objects = kvmalloc_array(nr, sizeof(void *), GFP_KERNEL);
|
||||
|
||||
/* If allocation here fails, just delay that till the first use */
|
||||
exec->max_objects = exec->objects ? PAGE_SIZE / sizeof(void *) : 0;
|
||||
exec->max_objects = exec->objects ? nr : 0;
|
||||
exec->num_objects = 0;
|
||||
exec->contended = DRM_EXEC_DUMMY;
|
||||
exec->prelocked = NULL;
|
||||
|
@ -1250,7 +1250,7 @@ drm_gpuvm_exec_lock(struct drm_gpuvm_exec *vm_exec)
|
||||
unsigned int num_fences = vm_exec->num_fences;
|
||||
int ret;
|
||||
|
||||
drm_exec_init(exec, vm_exec->flags);
|
||||
drm_exec_init(exec, vm_exec->flags, 0);
|
||||
|
||||
drm_exec_until_all_locked(exec) {
|
||||
ret = drm_gpuvm_prepare_vm(gpuvm, exec, num_fences);
|
||||
@ -1341,7 +1341,7 @@ drm_gpuvm_exec_lock_range(struct drm_gpuvm_exec *vm_exec,
|
||||
struct drm_exec *exec = &vm_exec->exec;
|
||||
int ret;
|
||||
|
||||
drm_exec_init(exec, vm_exec->flags);
|
||||
drm_exec_init(exec, vm_exec->flags, 0);
|
||||
|
||||
drm_exec_until_all_locked(exec) {
|
||||
ret = drm_gpuvm_prepare_range(gpuvm, exec, addr, range,
|
||||
|
@ -746,7 +746,7 @@ pvr_submit_jobs(struct pvr_device *pvr_dev, struct pvr_file *pvr_file,
|
||||
if (err)
|
||||
goto out_job_data_cleanup;
|
||||
|
||||
drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT | DRM_EXEC_IGNORE_DUPLICATES);
|
||||
drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT | DRM_EXEC_IGNORE_DUPLICATES, 0);
|
||||
|
||||
xa_init_flags(&signal_array, XA_FLAGS_ALLOC);
|
||||
|
||||
|
@ -1347,7 +1347,7 @@ nouveau_uvmm_bind_job_submit(struct nouveau_job *job,
|
||||
}
|
||||
}
|
||||
|
||||
drm_exec_init(exec, vme->flags);
|
||||
drm_exec_init(exec, vme->flags, 0);
|
||||
drm_exec_until_all_locked(exec) {
|
||||
ret = bind_lock_validate(job, exec, vme->num_fences);
|
||||
drm_exec_retry_on_contention(exec);
|
||||
|
@ -46,7 +46,7 @@ static void sanitycheck(struct kunit *test)
|
||||
{
|
||||
struct drm_exec exec;
|
||||
|
||||
drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT);
|
||||
drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0);
|
||||
drm_exec_fini(&exec);
|
||||
KUNIT_SUCCEED(test);
|
||||
}
|
||||
@ -60,7 +60,7 @@ static void test_lock(struct kunit *test)
|
||||
|
||||
drm_gem_private_object_init(priv->drm, &gobj, PAGE_SIZE);
|
||||
|
||||
drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT);
|
||||
drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0);
|
||||
drm_exec_until_all_locked(&exec) {
|
||||
ret = drm_exec_lock_obj(&exec, &gobj);
|
||||
drm_exec_retry_on_contention(&exec);
|
||||
@ -80,7 +80,7 @@ static void test_lock_unlock(struct kunit *test)
|
||||
|
||||
drm_gem_private_object_init(priv->drm, &gobj, PAGE_SIZE);
|
||||
|
||||
drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT);
|
||||
drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0);
|
||||
drm_exec_until_all_locked(&exec) {
|
||||
ret = drm_exec_lock_obj(&exec, &gobj);
|
||||
drm_exec_retry_on_contention(&exec);
|
||||
@ -107,7 +107,7 @@ static void test_duplicates(struct kunit *test)
|
||||
|
||||
drm_gem_private_object_init(priv->drm, &gobj, PAGE_SIZE);
|
||||
|
||||
drm_exec_init(&exec, DRM_EXEC_IGNORE_DUPLICATES);
|
||||
drm_exec_init(&exec, DRM_EXEC_IGNORE_DUPLICATES, 0);
|
||||
drm_exec_until_all_locked(&exec) {
|
||||
ret = drm_exec_lock_obj(&exec, &gobj);
|
||||
drm_exec_retry_on_contention(&exec);
|
||||
@ -134,7 +134,7 @@ static void test_prepare(struct kunit *test)
|
||||
|
||||
drm_gem_private_object_init(priv->drm, &gobj, PAGE_SIZE);
|
||||
|
||||
drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT);
|
||||
drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0);
|
||||
drm_exec_until_all_locked(&exec) {
|
||||
ret = drm_exec_prepare_obj(&exec, &gobj, 1);
|
||||
drm_exec_retry_on_contention(&exec);
|
||||
@ -159,7 +159,7 @@ static void test_prepare_array(struct kunit *test)
|
||||
drm_gem_private_object_init(priv->drm, &gobj1, PAGE_SIZE);
|
||||
drm_gem_private_object_init(priv->drm, &gobj2, PAGE_SIZE);
|
||||
|
||||
drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT);
|
||||
drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0);
|
||||
drm_exec_until_all_locked(&exec)
|
||||
ret = drm_exec_prepare_array(&exec, array, ARRAY_SIZE(array),
|
||||
1);
|
||||
@ -174,14 +174,14 @@ static void test_multiple_loops(struct kunit *test)
|
||||
{
|
||||
struct drm_exec exec;
|
||||
|
||||
drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT);
|
||||
drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0);
|
||||
drm_exec_until_all_locked(&exec)
|
||||
{
|
||||
break;
|
||||
}
|
||||
drm_exec_fini(&exec);
|
||||
|
||||
drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT);
|
||||
drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0);
|
||||
drm_exec_until_all_locked(&exec)
|
||||
{
|
||||
break;
|
||||
|
@ -135,7 +135,7 @@ static inline bool drm_exec_is_contended(struct drm_exec *exec)
|
||||
return !!exec->contended;
|
||||
}
|
||||
|
||||
void drm_exec_init(struct drm_exec *exec, uint32_t flags);
|
||||
void drm_exec_init(struct drm_exec *exec, uint32_t flags, unsigned nr);
|
||||
void drm_exec_fini(struct drm_exec *exec);
|
||||
bool drm_exec_cleanup(struct drm_exec *exec);
|
||||
int drm_exec_lock_obj(struct drm_exec *exec, struct drm_gem_object *obj);
|
||||
|
Loading…
Reference in New Issue
Block a user