forked from Minki/linux
drm/amdgpu: grab VMID before submitting job v5
This allows the scheduler to handle the dependencies on ID contention as well. v2: grab id only once v3: use a separate lock for the VMIDs v4: cleanup after semaphore removal v5: minor coding style change Signed-off-by: Christian König <christian.koenig@amd.com> Reviewed-by: Chunming Zhou <david1.zhou@amd.com> Acked-by: Alex Deucher <alexander.deucher@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
parent
165e4e07c2
commit
8d0a7cea82
@ -777,6 +777,7 @@ struct amdgpu_ib {
|
||||
struct amdgpu_ring *ring;
|
||||
struct amdgpu_fence *fence;
|
||||
struct amdgpu_user_fence *user;
|
||||
bool grabbed_vmid;
|
||||
struct amdgpu_vm *vm;
|
||||
struct amdgpu_ctx *ctx;
|
||||
struct amdgpu_sync sync;
|
||||
@ -925,6 +926,9 @@ struct amdgpu_vm {
|
||||
};
|
||||
|
||||
struct amdgpu_vm_manager {
|
||||
/* protecting IDs */
|
||||
struct mutex lock;
|
||||
|
||||
struct {
|
||||
struct fence *active;
|
||||
atomic_long_t owner;
|
||||
|
@ -1456,6 +1456,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
|
||||
/* mutex initialization are all done here so we
|
||||
* can recall function without having locking issues */
|
||||
mutex_init(&adev->ring_lock);
|
||||
mutex_init(&adev->vm_manager.lock);
|
||||
atomic_set(&adev->irq.ih.lock, 0);
|
||||
mutex_init(&adev->gem.mutex);
|
||||
mutex_init(&adev->pm.mutex);
|
||||
|
@ -142,21 +142,17 @@ int amdgpu_ib_schedule(struct amdgpu_device *adev, unsigned num_ibs,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (vm && !ibs->grabbed_vmid) {
|
||||
dev_err(adev->dev, "VM IB without ID\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
r = amdgpu_ring_lock(ring, (256 + AMDGPU_NUM_SYNCS * 8) * num_ibs);
|
||||
if (r) {
|
||||
dev_err(adev->dev, "scheduling IB failed (%d).\n", r);
|
||||
return r;
|
||||
}
|
||||
|
||||
if (vm) {
|
||||
/* grab a vm id if necessary */
|
||||
r = amdgpu_vm_grab_id(ibs->vm, ibs->ring, &ibs->sync);
|
||||
if (r) {
|
||||
amdgpu_ring_unlock_undo(ring);
|
||||
return r;
|
||||
}
|
||||
}
|
||||
|
||||
r = amdgpu_sync_wait(&ibs->sync);
|
||||
if (r) {
|
||||
amdgpu_ring_unlock_undo(ring);
|
||||
@ -207,9 +203,6 @@ int amdgpu_ib_schedule(struct amdgpu_device *adev, unsigned num_ibs,
|
||||
AMDGPU_FENCE_FLAG_64BIT);
|
||||
}
|
||||
|
||||
if (ib->vm)
|
||||
amdgpu_vm_fence(adev, ib->vm, &ib->fence->base);
|
||||
|
||||
amdgpu_ring_unlock_commit(ring);
|
||||
return 0;
|
||||
}
|
||||
|
@ -31,7 +31,31 @@
|
||||
static struct fence *amdgpu_sched_dependency(struct amd_sched_job *sched_job)
|
||||
{
|
||||
struct amdgpu_job *job = to_amdgpu_job(sched_job);
|
||||
return amdgpu_sync_get_fence(&job->ibs->sync);
|
||||
struct amdgpu_sync *sync = &job->ibs->sync;
|
||||
struct amdgpu_vm *vm = job->ibs->vm;
|
||||
|
||||
struct fence *fence = amdgpu_sync_get_fence(sync);
|
||||
|
||||
if (fence == NULL && vm && !job->ibs->grabbed_vmid) {
|
||||
struct amdgpu_ring *ring = job->ibs->ring;
|
||||
struct amdgpu_device *adev = ring->adev;
|
||||
int r;
|
||||
|
||||
mutex_lock(&adev->vm_manager.lock);
|
||||
r = amdgpu_vm_grab_id(vm, ring, sync);
|
||||
if (r) {
|
||||
DRM_ERROR("Error getting VM ID (%d)\n", r);
|
||||
} else {
|
||||
fence = &job->base.s_fence->base;
|
||||
amdgpu_vm_fence(ring->adev, vm, fence);
|
||||
job->ibs->grabbed_vmid = true;
|
||||
}
|
||||
mutex_unlock(&adev->vm_manager.lock);
|
||||
|
||||
fence = amdgpu_sync_get_fence(sync);
|
||||
}
|
||||
|
||||
return fence;
|
||||
}
|
||||
|
||||
static struct fence *amdgpu_sched_run_job(struct amd_sched_job *sched_job)
|
||||
|
Loading…
Reference in New Issue
Block a user