forked from Minki/linux
drm/amdgpu: use a fence array for VMID management
Just wait for any fence to become available, instead of waiting for the last entry of the LRU. Acked-by: Alex Deucher <alexander.deucher@amd.com> Signed-off-by: Christian König <christian.koenig@amd.com> Acked-by: Chunming Zhou <david1.zhou@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
parent
8d76001e00
commit
1fbb2e9299
@ -597,10 +597,8 @@ int amdgpu_sync_resv(struct amdgpu_device *adev,
|
||||
struct amdgpu_sync *sync,
|
||||
struct reservation_object *resv,
|
||||
void *owner);
|
||||
bool amdgpu_sync_is_idle(struct amdgpu_sync *sync,
|
||||
struct amdgpu_ring *ring);
|
||||
int amdgpu_sync_cycle_fences(struct amdgpu_sync *dst, struct amdgpu_sync *src,
|
||||
struct fence *fence);
|
||||
struct fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
|
||||
struct amdgpu_ring *ring);
|
||||
struct fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync);
|
||||
void amdgpu_sync_free(struct amdgpu_sync *sync);
|
||||
int amdgpu_sync_init(void);
|
||||
@ -909,6 +907,10 @@ struct amdgpu_vm_manager {
|
||||
struct list_head ids_lru;
|
||||
struct amdgpu_vm_id ids[AMDGPU_NUM_VM];
|
||||
|
||||
/* Handling of VM fences */
|
||||
u64 fence_context;
|
||||
unsigned seqno[AMDGPU_MAX_RINGS];
|
||||
|
||||
uint32_t max_pfn;
|
||||
/* vram base address for page table entry */
|
||||
u64 vram_base_offset;
|
||||
|
@ -166,7 +166,7 @@ static struct fence *amdgpu_job_run(struct amd_sched_job *sched_job)
|
||||
}
|
||||
job = to_amdgpu_job(sched_job);
|
||||
|
||||
BUG_ON(!amdgpu_sync_is_idle(&job->sync, NULL));
|
||||
BUG_ON(amdgpu_sync_peek_fence(&job->sync, NULL));
|
||||
|
||||
trace_amdgpu_sched_run_job(job);
|
||||
r = amdgpu_ib_schedule(job->ring, job->num_ibs, job->ibs,
|
||||
|
@ -223,16 +223,16 @@ int amdgpu_sync_resv(struct amdgpu_device *adev,
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_sync_is_idle - test if all fences are signaled
|
||||
* amdgpu_sync_peek_fence - get the next fence not signaled yet
|
||||
*
|
||||
* @sync: the sync object
|
||||
* @ring: optional ring to use for test
|
||||
*
|
||||
* Returns true if all fences in the sync object are signaled or scheduled to
|
||||
* the ring (if provided).
|
||||
* Returns the next fence not signaled yet without removing it from the sync
|
||||
* object.
|
||||
*/
|
||||
bool amdgpu_sync_is_idle(struct amdgpu_sync *sync,
|
||||
struct amdgpu_ring *ring)
|
||||
struct fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
|
||||
struct amdgpu_ring *ring)
|
||||
{
|
||||
struct amdgpu_sync_entry *e;
|
||||
struct hlist_node *tmp;
|
||||
@ -246,9 +246,12 @@ bool amdgpu_sync_is_idle(struct amdgpu_sync *sync,
|
||||
/* For fences from the same ring it is sufficient
|
||||
* when they are scheduled.
|
||||
*/
|
||||
if (s_fence->sched == &ring->sched &&
|
||||
fence_is_signaled(&s_fence->scheduled))
|
||||
continue;
|
||||
if (s_fence->sched == &ring->sched) {
|
||||
if (fence_is_signaled(&s_fence->scheduled))
|
||||
continue;
|
||||
|
||||
return &s_fence->scheduled;
|
||||
}
|
||||
}
|
||||
|
||||
if (fence_is_signaled(f)) {
|
||||
@ -258,56 +261,10 @@ bool amdgpu_sync_is_idle(struct amdgpu_sync *sync,
|
||||
continue;
|
||||
}
|
||||
|
||||
return false;
|
||||
return f;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_sync_cycle_fences - move fences from one sync object into another
|
||||
*
|
||||
* @dst: the destination sync object
|
||||
* @src: the source sync object
|
||||
* @fence: fence to add to source
|
||||
*
|
||||
* Remove all fences from source and put them into destination and add
|
||||
* fence as new one into source.
|
||||
*/
|
||||
int amdgpu_sync_cycle_fences(struct amdgpu_sync *dst, struct amdgpu_sync *src,
|
||||
struct fence *fence)
|
||||
{
|
||||
struct amdgpu_sync_entry *e, *newone;
|
||||
struct hlist_node *tmp;
|
||||
int i;
|
||||
|
||||
/* Allocate the new entry before moving the old ones */
|
||||
newone = kmem_cache_alloc(amdgpu_sync_slab, GFP_KERNEL);
|
||||
if (!newone)
|
||||
return -ENOMEM;
|
||||
|
||||
hash_for_each_safe(src->fences, i, tmp, e, node) {
|
||||
struct fence *f = e->fence;
|
||||
|
||||
hash_del(&e->node);
|
||||
if (fence_is_signaled(f)) {
|
||||
fence_put(f);
|
||||
kmem_cache_free(amdgpu_sync_slab, e);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (amdgpu_sync_add_later(dst, f)) {
|
||||
kmem_cache_free(amdgpu_sync_slab, e);
|
||||
continue;
|
||||
}
|
||||
|
||||
hash_add(dst->fences, &e->node, f->context);
|
||||
}
|
||||
|
||||
hash_add(src->fences, &newone->node, fence->context);
|
||||
newone->fence = fence_get(fence);
|
||||
|
||||
return 0;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -25,6 +25,7 @@
|
||||
* Alex Deucher
|
||||
* Jerome Glisse
|
||||
*/
|
||||
#include <linux/fence-array.h>
|
||||
#include <drm/drmP.h>
|
||||
#include <drm/amdgpu_drm.h>
|
||||
#include "amdgpu.h"
|
||||
@ -180,82 +181,116 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
|
||||
struct amdgpu_device *adev = ring->adev;
|
||||
struct fence *updates = sync->last_vm_update;
|
||||
struct amdgpu_vm_id *id, *idle;
|
||||
unsigned i = ring->idx;
|
||||
int r;
|
||||
struct fence **fences;
|
||||
unsigned i;
|
||||
int r = 0;
|
||||
|
||||
fences = kmalloc_array(sizeof(void *), adev->vm_manager.num_ids,
|
||||
GFP_KERNEL);
|
||||
if (!fences)
|
||||
return -ENOMEM;
|
||||
|
||||
mutex_lock(&adev->vm_manager.lock);
|
||||
|
||||
/* Check if we have an idle VMID */
|
||||
i = 0;
|
||||
list_for_each_entry(idle, &adev->vm_manager.ids_lru, list) {
|
||||
if (amdgpu_sync_is_idle(&idle->active, ring))
|
||||
fences[i] = amdgpu_sync_peek_fence(&idle->active, ring);
|
||||
if (!fences[i])
|
||||
break;
|
||||
|
||||
++i;
|
||||
}
|
||||
|
||||
/* If we can't find a idle VMID to use, just wait for the oldest */
|
||||
/* If we can't find a idle VMID to use, wait till one becomes available */
|
||||
if (&idle->list == &adev->vm_manager.ids_lru) {
|
||||
id = list_first_entry(&adev->vm_manager.ids_lru,
|
||||
struct amdgpu_vm_id,
|
||||
list);
|
||||
} else {
|
||||
/* Check if we can use a VMID already assigned to this VM */
|
||||
do {
|
||||
struct fence *flushed;
|
||||
u64 fence_context = adev->vm_manager.fence_context + ring->idx;
|
||||
unsigned seqno = ++adev->vm_manager.seqno[ring->idx];
|
||||
struct fence_array *array;
|
||||
unsigned j;
|
||||
|
||||
id = vm->ids[i++];
|
||||
if (i == AMDGPU_MAX_RINGS)
|
||||
i = 0;
|
||||
for (j = 0; j < i; ++j)
|
||||
fence_get(fences[j]);
|
||||
|
||||
/* Check all the prerequisites to using this VMID */
|
||||
if (!id)
|
||||
continue;
|
||||
array = fence_array_create(i, fences, fence_context,
|
||||
seqno, true);
|
||||
if (!array) {
|
||||
for (j = 0; j < i; ++j)
|
||||
fence_put(fences[j]);
|
||||
kfree(fences);
|
||||
r = -ENOMEM;
|
||||
goto error;
|
||||
}
|
||||
|
||||
if (atomic64_read(&id->owner) != vm->client_id)
|
||||
continue;
|
||||
|
||||
if (pd_addr != id->pd_gpu_addr)
|
||||
continue;
|
||||
r = amdgpu_sync_fence(ring->adev, sync, &array->base);
|
||||
fence_put(&array->base);
|
||||
if (r)
|
||||
goto error;
|
||||
|
||||
if (id->last_user != ring && (!id->last_flush ||
|
||||
!fence_is_signaled(id->last_flush)))
|
||||
continue;
|
||||
mutex_unlock(&adev->vm_manager.lock);
|
||||
return 0;
|
||||
|
||||
flushed = id->flushed_updates;
|
||||
if (updates && (!flushed ||
|
||||
fence_is_later(updates, flushed)))
|
||||
continue;
|
||||
}
|
||||
kfree(fences);
|
||||
|
||||
/* Good we can use this VMID */
|
||||
if (id->last_user == ring) {
|
||||
r = amdgpu_sync_fence(ring->adev, sync,
|
||||
id->first);
|
||||
if (r)
|
||||
goto error;
|
||||
}
|
||||
/* Check if we can use a VMID already assigned to this VM */
|
||||
i = ring->idx;
|
||||
do {
|
||||
struct fence *flushed;
|
||||
|
||||
/* And remember this submission as user of the VMID */
|
||||
r = amdgpu_sync_fence(ring->adev, &id->active, fence);
|
||||
id = vm->ids[i++];
|
||||
if (i == AMDGPU_MAX_RINGS)
|
||||
i = 0;
|
||||
|
||||
/* Check all the prerequisites to using this VMID */
|
||||
if (!id)
|
||||
continue;
|
||||
|
||||
if (atomic64_read(&id->owner) != vm->client_id)
|
||||
continue;
|
||||
|
||||
if (pd_addr != id->pd_gpu_addr)
|
||||
continue;
|
||||
|
||||
if (id->last_user != ring &&
|
||||
(!id->last_flush || !fence_is_signaled(id->last_flush)))
|
||||
continue;
|
||||
|
||||
flushed = id->flushed_updates;
|
||||
if (updates &&
|
||||
(!flushed || fence_is_later(updates, flushed)))
|
||||
continue;
|
||||
|
||||
/* Good we can use this VMID */
|
||||
if (id->last_user == ring) {
|
||||
r = amdgpu_sync_fence(ring->adev, sync,
|
||||
id->first);
|
||||
if (r)
|
||||
goto error;
|
||||
}
|
||||
|
||||
list_move_tail(&id->list, &adev->vm_manager.ids_lru);
|
||||
vm->ids[ring->idx] = id;
|
||||
/* And remember this submission as user of the VMID */
|
||||
r = amdgpu_sync_fence(ring->adev, &id->active, fence);
|
||||
if (r)
|
||||
goto error;
|
||||
|
||||
*vm_id = id - adev->vm_manager.ids;
|
||||
*vm_pd_addr = AMDGPU_VM_NO_FLUSH;
|
||||
trace_amdgpu_vm_grab_id(vm, ring->idx, *vm_id,
|
||||
*vm_pd_addr);
|
||||
list_move_tail(&id->list, &adev->vm_manager.ids_lru);
|
||||
vm->ids[ring->idx] = id;
|
||||
|
||||
mutex_unlock(&adev->vm_manager.lock);
|
||||
return 0;
|
||||
*vm_id = id - adev->vm_manager.ids;
|
||||
*vm_pd_addr = AMDGPU_VM_NO_FLUSH;
|
||||
trace_amdgpu_vm_grab_id(vm, ring->idx, *vm_id, *vm_pd_addr);
|
||||
|
||||
} while (i != ring->idx);
|
||||
mutex_unlock(&adev->vm_manager.lock);
|
||||
return 0;
|
||||
|
||||
/* Still no ID to use? Then use the idle one found earlier */
|
||||
id = idle;
|
||||
}
|
||||
} while (i != ring->idx);
|
||||
|
||||
r = amdgpu_sync_cycle_fences(sync, &id->active, fence);
|
||||
/* Still no ID to use? Then use the idle one found earlier */
|
||||
id = idle;
|
||||
|
||||
/* Remember this submission as user of the VMID */
|
||||
r = amdgpu_sync_fence(ring->adev, &id->active, fence);
|
||||
if (r)
|
||||
goto error;
|
||||
|
||||
@ -1515,6 +1550,10 @@ void amdgpu_vm_manager_init(struct amdgpu_device *adev)
|
||||
&adev->vm_manager.ids_lru);
|
||||
}
|
||||
|
||||
adev->vm_manager.fence_context = fence_context_alloc(AMDGPU_MAX_RINGS);
|
||||
for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
|
||||
adev->vm_manager.seqno[i] = 0;
|
||||
|
||||
atomic_set(&adev->vm_manager.vm_pte_next_ring, 0);
|
||||
atomic64_set(&adev->vm_manager.client_counter, 0);
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user