mirror of
https://github.com/torvalds/linux.git
synced 2024-11-22 12:11:40 +00:00
drm/xe: Convert to USM lock to rwsem
Remove contention from GPU fault path for ASID->VM lookup.
Signed-off-by: Matthew Brost <matthew.brost@intel.com>
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
Reviewed-by: Himal Prasad Ghimiray <himal.prasad.ghimiray@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20240918054436.1971839-1-matthew.brost@intel.com
(cherry picked from commit 1378c633a3
)
Signed-off-by: Lucas De Marchi <lucas.demarchi@intel.com>
This commit is contained in:
parent
cb58977016
commit
d1ef967126
@ -339,9 +339,7 @@ struct xe_device *xe_device_create(struct pci_dev *pdev,
|
||||
|
||||
init_waitqueue_head(&xe->ufence_wq);
|
||||
|
||||
err = drmm_mutex_init(&xe->drm, &xe->usm.lock);
|
||||
if (err)
|
||||
goto err;
|
||||
init_rwsem(&xe->usm.lock);
|
||||
|
||||
xa_init_flags(&xe->usm.asid_to_vm, XA_FLAGS_ALLOC);
|
||||
|
||||
|
@ -369,7 +369,7 @@ struct xe_device {
|
||||
/** @usm.next_asid: next ASID, used to cyclical alloc asids */
|
||||
u32 next_asid;
|
||||
/** @usm.lock: protects UM state */
|
||||
struct mutex lock;
|
||||
struct rw_semaphore lock;
|
||||
} usm;
|
||||
|
||||
/** @pinned: pinned BO state */
|
||||
|
@ -198,13 +198,13 @@ static int handle_pagefault(struct xe_gt *gt, struct pagefault *pf)
|
||||
return -EFAULT;
|
||||
|
||||
/* ASID to VM */
|
||||
mutex_lock(&xe->usm.lock);
|
||||
down_read(&xe->usm.lock);
|
||||
vm = xa_load(&xe->usm.asid_to_vm, pf->asid);
|
||||
if (vm && xe_vm_in_fault_mode(vm))
|
||||
xe_vm_get(vm);
|
||||
else
|
||||
vm = NULL;
|
||||
mutex_unlock(&xe->usm.lock);
|
||||
up_read(&xe->usm.lock);
|
||||
if (!vm)
|
||||
return -EINVAL;
|
||||
|
||||
@ -549,11 +549,11 @@ static int handle_acc(struct xe_gt *gt, struct acc *acc)
|
||||
return -EINVAL;
|
||||
|
||||
/* ASID to VM */
|
||||
mutex_lock(&xe->usm.lock);
|
||||
down_read(&xe->usm.lock);
|
||||
vm = xa_load(&xe->usm.asid_to_vm, acc->asid);
|
||||
if (vm)
|
||||
xe_vm_get(vm);
|
||||
mutex_unlock(&xe->usm.lock);
|
||||
up_read(&xe->usm.lock);
|
||||
if (!vm || !xe_vm_in_fault_mode(vm))
|
||||
return -EINVAL;
|
||||
|
||||
|
@ -1613,7 +1613,7 @@ void xe_vm_close_and_put(struct xe_vm *vm)
|
||||
|
||||
up_write(&vm->lock);
|
||||
|
||||
mutex_lock(&xe->usm.lock);
|
||||
down_write(&xe->usm.lock);
|
||||
if (vm->usm.asid) {
|
||||
void *lookup;
|
||||
|
||||
@ -1623,7 +1623,7 @@ void xe_vm_close_and_put(struct xe_vm *vm)
|
||||
lookup = xa_erase(&xe->usm.asid_to_vm, vm->usm.asid);
|
||||
xe_assert(xe, lookup == vm);
|
||||
}
|
||||
mutex_unlock(&xe->usm.lock);
|
||||
up_write(&xe->usm.lock);
|
||||
|
||||
for_each_tile(tile, xe, id)
|
||||
xe_range_fence_tree_fini(&vm->rftree[id]);
|
||||
@ -1772,11 +1772,11 @@ int xe_vm_create_ioctl(struct drm_device *dev, void *data,
|
||||
goto err_close_and_put;
|
||||
|
||||
if (xe->info.has_asid) {
|
||||
mutex_lock(&xe->usm.lock);
|
||||
down_write(&xe->usm.lock);
|
||||
err = xa_alloc_cyclic(&xe->usm.asid_to_vm, &asid, vm,
|
||||
XA_LIMIT(1, XE_MAX_ASID - 1),
|
||||
&xe->usm.next_asid, GFP_KERNEL);
|
||||
mutex_unlock(&xe->usm.lock);
|
||||
up_write(&xe->usm.lock);
|
||||
if (err < 0)
|
||||
goto err_free_id;
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user