drm/amdkfd: unregistered svm range not overlap with TTM range

When creating unregistered new svm range to recover retry fault, avoid
new svm range to overlap with ranges or userptr ranges managed by TTM,
otherwise svm migration will trigger TTM or userptr eviction, to evict
user queues unexpectedly.

Change helper amdgpu_ttm_tt_affect_userptr to return userptr which is
inside the range. Add helper svm_range_check_vm_userptr to scan all
userptr of the vm, and return overlap userptr bo start, last.

Signed-off-by: Philip Yang <Philip.Yang@amd.com>
Reviewed-by: Felix Kuehling <Felix.Kuehling@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
Philip Yang
2021-10-11 23:12:25 -04:00
committed by Alex Deucher
parent f23750b5b3
commit 43fc10c187
3 changed files with 95 additions and 8 deletions

View File

@@ -1235,7 +1235,7 @@ struct mm_struct *amdgpu_ttm_tt_get_usermm(struct ttm_tt *ttm)
* *
*/ */
bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start, bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
unsigned long end) unsigned long end, unsigned long *userptr)
{ {
struct amdgpu_ttm_tt *gtt = (void *)ttm; struct amdgpu_ttm_tt *gtt = (void *)ttm;
unsigned long size; unsigned long size;
@@ -1250,6 +1250,8 @@ bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
if (gtt->userptr > end || gtt->userptr + size <= start) if (gtt->userptr > end || gtt->userptr + size <= start)
return false; return false;
if (userptr)
*userptr = gtt->userptr;
return true; return true;
} }

View File

@@ -182,7 +182,7 @@ int amdgpu_ttm_tt_set_userptr(struct ttm_buffer_object *bo,
bool amdgpu_ttm_tt_has_userptr(struct ttm_tt *ttm); bool amdgpu_ttm_tt_has_userptr(struct ttm_tt *ttm);
struct mm_struct *amdgpu_ttm_tt_get_usermm(struct ttm_tt *ttm); struct mm_struct *amdgpu_ttm_tt_get_usermm(struct ttm_tt *ttm);
bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start, bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
unsigned long end); unsigned long end, unsigned long *userptr);
bool amdgpu_ttm_tt_userptr_invalidated(struct ttm_tt *ttm, bool amdgpu_ttm_tt_userptr_invalidated(struct ttm_tt *ttm,
int *last_invalidated); int *last_invalidated);
bool amdgpu_ttm_tt_is_userptr(struct ttm_tt *ttm); bool amdgpu_ttm_tt_is_userptr(struct ttm_tt *ttm);

View File

@@ -50,7 +50,9 @@ static bool
svm_range_cpu_invalidate_pagetables(struct mmu_interval_notifier *mni, svm_range_cpu_invalidate_pagetables(struct mmu_interval_notifier *mni,
const struct mmu_notifier_range *range, const struct mmu_notifier_range *range,
unsigned long cur_seq); unsigned long cur_seq);
static int
svm_range_check_vm(struct kfd_process *p, uint64_t start, uint64_t last,
uint64_t *bo_s, uint64_t *bo_l);
static const struct mmu_interval_notifier_ops svm_range_mn_ops = { static const struct mmu_interval_notifier_ops svm_range_mn_ops = {
.invalidate = svm_range_cpu_invalidate_pagetables, .invalidate = svm_range_cpu_invalidate_pagetables,
}; };
@@ -2308,6 +2310,7 @@ svm_range_best_restore_location(struct svm_range *prange,
return -1; return -1;
} }
static int static int
svm_range_get_range_boundaries(struct kfd_process *p, int64_t addr, svm_range_get_range_boundaries(struct kfd_process *p, int64_t addr,
unsigned long *start, unsigned long *last) unsigned long *start, unsigned long *last)
@@ -2355,8 +2358,59 @@ svm_range_get_range_boundaries(struct kfd_process *p, int64_t addr,
vma->vm_end >> PAGE_SHIFT, *last); vma->vm_end >> PAGE_SHIFT, *last);
return 0; return 0;
} }
static int
svm_range_check_vm_userptr(struct kfd_process *p, uint64_t start, uint64_t last,
uint64_t *bo_s, uint64_t *bo_l)
{
struct amdgpu_bo_va_mapping *mapping;
struct interval_tree_node *node;
struct amdgpu_bo *bo = NULL;
unsigned long userptr;
uint32_t i;
int r;
for (i = 0; i < p->n_pdds; i++) {
struct amdgpu_vm *vm;
if (!p->pdds[i]->drm_priv)
continue;
vm = drm_priv_to_vm(p->pdds[i]->drm_priv);
r = amdgpu_bo_reserve(vm->root.bo, false);
if (r)
return r;
/* Check userptr by searching entire vm->va interval tree */
node = interval_tree_iter_first(&vm->va, 0, ~0ULL);
while (node) {
mapping = container_of((struct rb_node *)node,
struct amdgpu_bo_va_mapping, rb);
bo = mapping->bo_va->base.bo;
if (!amdgpu_ttm_tt_affect_userptr(bo->tbo.ttm,
start << PAGE_SHIFT,
last << PAGE_SHIFT,
&userptr)) {
node = interval_tree_iter_next(node, 0, ~0ULL);
continue;
}
pr_debug("[0x%llx 0x%llx] already userptr mapped\n",
start, last);
if (bo_s && bo_l) {
*bo_s = userptr >> PAGE_SHIFT;
*bo_l = *bo_s + bo->tbo.ttm->num_pages - 1;
}
amdgpu_bo_unreserve(vm->root.bo);
return -EADDRINUSE;
}
amdgpu_bo_unreserve(vm->root.bo);
}
return 0;
}
static struct static struct
svm_range *svm_range_create_unregistered_range(struct amdgpu_device *adev, svm_range *svm_range_create_unregistered_range(struct amdgpu_device *adev,
struct kfd_process *p, struct kfd_process *p,
@@ -2366,10 +2420,26 @@ svm_range *svm_range_create_unregistered_range(struct amdgpu_device *adev,
struct svm_range *prange = NULL; struct svm_range *prange = NULL;
unsigned long start, last; unsigned long start, last;
uint32_t gpuid, gpuidx; uint32_t gpuid, gpuidx;
uint64_t bo_s = 0;
uint64_t bo_l = 0;
int r;
if (svm_range_get_range_boundaries(p, addr, &start, &last)) if (svm_range_get_range_boundaries(p, addr, &start, &last))
return NULL; return NULL;
r = svm_range_check_vm(p, start, last, &bo_s, &bo_l);
if (r != -EADDRINUSE)
r = svm_range_check_vm_userptr(p, start, last, &bo_s, &bo_l);
if (r == -EADDRINUSE) {
if (addr >= bo_s && addr <= bo_l)
return NULL;
/* Create one page svm range if 2MB range overlapping */
start = addr;
last = addr;
}
prange = svm_range_new(&p->svms, start, last); prange = svm_range_new(&p->svms, start, last);
if (!prange) { if (!prange) {
pr_debug("Failed to create prange in address [0x%llx]\n", addr); pr_debug("Failed to create prange in address [0x%llx]\n", addr);
@@ -2672,6 +2742,8 @@ int svm_range_list_init(struct kfd_process *p)
* @p: current kfd_process * @p: current kfd_process
* @start: range start address, in pages * @start: range start address, in pages
* @last: range last address, in pages * @last: range last address, in pages
* @bo_s: mapping start address in pages if address range already mapped
* @bo_l: mapping last address in pages if address range already mapped
* *
* The purpose is to avoid virtual address ranges already allocated by * The purpose is to avoid virtual address ranges already allocated by
* kfd_ioctl_alloc_memory_of_gpu ioctl. * kfd_ioctl_alloc_memory_of_gpu ioctl.
@@ -2686,8 +2758,11 @@ int svm_range_list_init(struct kfd_process *p)
* a signal. Release all buffer reservations and return to user-space. * a signal. Release all buffer reservations and return to user-space.
*/ */
static int static int
svm_range_check_vm(struct kfd_process *p, uint64_t start, uint64_t last) svm_range_check_vm(struct kfd_process *p, uint64_t start, uint64_t last,
uint64_t *bo_s, uint64_t *bo_l)
{ {
struct amdgpu_bo_va_mapping *mapping;
struct interval_tree_node *node;
uint32_t i; uint32_t i;
int r; int r;
@@ -2701,8 +2776,17 @@ svm_range_check_vm(struct kfd_process *p, uint64_t start, uint64_t last)
r = amdgpu_bo_reserve(vm->root.bo, false); r = amdgpu_bo_reserve(vm->root.bo, false);
if (r) if (r)
return r; return r;
if (interval_tree_iter_first(&vm->va, start, last)) {
pr_debug("Range [0x%llx 0x%llx] already mapped\n", start, last); node = interval_tree_iter_first(&vm->va, start, last);
if (node) {
pr_debug("range [0x%llx 0x%llx] already TTM mapped\n",
start, last);
mapping = container_of((struct rb_node *)node,
struct amdgpu_bo_va_mapping, rb);
if (bo_s && bo_l) {
*bo_s = mapping->start;
*bo_l = mapping->last;
}
amdgpu_bo_unreserve(vm->root.bo); amdgpu_bo_unreserve(vm->root.bo);
return -EADDRINUSE; return -EADDRINUSE;
} }
@@ -2743,7 +2827,8 @@ svm_range_is_valid(struct kfd_process *p, uint64_t start, uint64_t size)
start = min(end, vma->vm_end); start = min(end, vma->vm_end);
} while (start < end); } while (start < end);
return svm_range_check_vm(p, start_unchg, (end - 1) >> PAGE_SHIFT); return svm_range_check_vm(p, start_unchg, (end - 1) >> PAGE_SHIFT, NULL,
NULL);
} }
/** /**