mirror of
https://github.com/torvalds/linux.git
synced 2025-01-01 07:42:07 +00:00
drm/amdgpu: use kernel fence for last_pt_update
Signed-off-by: Chunming Zhou <david1.zhou@amd.com> Reviewed-by: Christian K?nig <christian.koenig@amd.com> Reviewed-by: Jammy Zhou <jammy.zhou@amd.com>
This commit is contained in:
parent
e40a31159b
commit
bb1e38a4be
@ -539,7 +539,7 @@ struct amdgpu_bo_va_mapping {
|
||||
struct amdgpu_bo_va {
|
||||
/* protected by bo being reserved */
|
||||
struct list_head bo_list;
|
||||
struct amdgpu_fence *last_pt_update;
|
||||
struct fence *last_pt_update;
|
||||
unsigned ref_count;
|
||||
|
||||
/* protected by vm mutex and spinlock */
|
||||
@ -1241,7 +1241,7 @@ union amdgpu_sched_job_param {
|
||||
struct amdgpu_vm *vm;
|
||||
uint64_t start;
|
||||
uint64_t last;
|
||||
struct amdgpu_fence **fence;
|
||||
struct fence **fence;
|
||||
|
||||
} vm_mapping;
|
||||
struct {
|
||||
|
@ -581,7 +581,7 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p,
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
f = &bo_va->last_pt_update->base;
|
||||
f = bo_va->last_pt_update;
|
||||
r = amdgpu_sync_fence(adev, &p->ibs[0].sync, f);
|
||||
if (r)
|
||||
return r;
|
||||
|
@ -737,7 +737,7 @@ static int amdgpu_vm_update_ptes(struct amdgpu_device *adev,
|
||||
*/
|
||||
static void amdgpu_vm_fence_pts(struct amdgpu_vm *vm,
|
||||
uint64_t start, uint64_t end,
|
||||
struct amdgpu_fence *fence)
|
||||
struct fence *fence)
|
||||
{
|
||||
unsigned i;
|
||||
|
||||
@ -745,20 +745,20 @@ static void amdgpu_vm_fence_pts(struct amdgpu_vm *vm,
|
||||
end >>= amdgpu_vm_block_size;
|
||||
|
||||
for (i = start; i <= end; ++i)
|
||||
amdgpu_bo_fence(vm->page_tables[i].bo, &fence->base, true);
|
||||
amdgpu_bo_fence(vm->page_tables[i].bo, fence, true);
|
||||
}
|
||||
|
||||
static int amdgpu_vm_bo_update_mapping_run_job(
|
||||
struct amdgpu_cs_parser *sched_job)
|
||||
{
|
||||
struct amdgpu_fence **fence = sched_job->job_param.vm_mapping.fence;
|
||||
struct fence **fence = sched_job->job_param.vm_mapping.fence;
|
||||
amdgpu_vm_fence_pts(sched_job->job_param.vm_mapping.vm,
|
||||
sched_job->job_param.vm_mapping.start,
|
||||
sched_job->job_param.vm_mapping.last + 1,
|
||||
sched_job->ibs[sched_job->num_ibs -1].fence);
|
||||
&sched_job->ibs[sched_job->num_ibs -1].fence->base);
|
||||
if (fence) {
|
||||
amdgpu_fence_unref(fence);
|
||||
*fence = amdgpu_fence_ref(sched_job->ibs[sched_job->num_ibs -1].fence);
|
||||
fence_put(*fence);
|
||||
*fence = fence_get(&sched_job->ibs[sched_job->num_ibs -1].fence->base);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
@ -781,7 +781,7 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
|
||||
struct amdgpu_vm *vm,
|
||||
struct amdgpu_bo_va_mapping *mapping,
|
||||
uint64_t addr, uint32_t gtt_flags,
|
||||
struct amdgpu_fence **fence)
|
||||
struct fence **fence)
|
||||
{
|
||||
struct amdgpu_ring *ring = adev->vm_manager.vm_pte_funcs_ring;
|
||||
unsigned nptes, ncmds, ndw;
|
||||
@ -902,10 +902,10 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
|
||||
}
|
||||
|
||||
amdgpu_vm_fence_pts(vm, mapping->it.start,
|
||||
mapping->it.last + 1, ib->fence);
|
||||
mapping->it.last + 1, &ib->fence->base);
|
||||
if (fence) {
|
||||
amdgpu_fence_unref(fence);
|
||||
*fence = amdgpu_fence_ref(ib->fence);
|
||||
fence_put(*fence);
|
||||
*fence = fence_get(&ib->fence->base);
|
||||
}
|
||||
|
||||
amdgpu_ib_free(adev, ib);
|
||||
@ -1038,7 +1038,7 @@ int amdgpu_vm_clear_invalids(struct amdgpu_device *adev,
|
||||
spin_unlock(&vm->status_lock);
|
||||
|
||||
if (bo_va)
|
||||
r = amdgpu_sync_fence(adev, sync, &bo_va->last_pt_update->base);
|
||||
r = amdgpu_sync_fence(adev, sync, bo_va->last_pt_update);
|
||||
|
||||
return r;
|
||||
}
|
||||
@ -1318,7 +1318,7 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
|
||||
kfree(mapping);
|
||||
}
|
||||
|
||||
amdgpu_fence_unref(&bo_va->last_pt_update);
|
||||
fence_put(bo_va->last_pt_update);
|
||||
kfree(bo_va);
|
||||
|
||||
mutex_unlock(&vm->mutex);
|
||||
|
Loading…
Reference in New Issue
Block a user