IB/core: Enable ODP sync without faulting

Enable ODP sync without faulting, this improves performance by reducing
the number of page faults in the system.

The gain from this option is that the device page table can be aligned
with the presented pages in the CPU page table without causing page
faults.

As of that, the overhead on data path from hardware point of view to
trigger a fault which end-up by calling the driver to bring the pages
will be dropped.

Link: https://lore.kernel.org/r/20200930163828.1336747-3-leon@kernel.org
Signed-off-by: Yishai Hadas <yishaih@nvidia.com>
Signed-off-by: Leon Romanovsky <leonro@nvidia.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
This commit is contained in:
Yishai Hadas 2020-09-30 19:38:26 +03:00 committed by Jason Gunthorpe
parent 36f30e486d
commit 8bfafde086
3 changed files with 27 additions and 12 deletions

View File

@ -347,9 +347,10 @@ static int ib_umem_odp_map_dma_single_page(
* the return value. * the return value.
* @access_mask: bit mask of the requested access permissions for the given * @access_mask: bit mask of the requested access permissions for the given
* range. * range.
* @fault: is faulting required for the given range
*/ */
int ib_umem_odp_map_dma_and_lock(struct ib_umem_odp *umem_odp, u64 user_virt, int ib_umem_odp_map_dma_and_lock(struct ib_umem_odp *umem_odp, u64 user_virt,
u64 bcnt, u64 access_mask) u64 bcnt, u64 access_mask, bool fault)
__acquires(&umem_odp->umem_mutex) __acquires(&umem_odp->umem_mutex)
{ {
struct task_struct *owning_process = NULL; struct task_struct *owning_process = NULL;
@ -385,10 +386,12 @@ int ib_umem_odp_map_dma_and_lock(struct ib_umem_odp *umem_odp, u64 user_virt,
range.end = ALIGN(user_virt + bcnt, 1UL << page_shift); range.end = ALIGN(user_virt + bcnt, 1UL << page_shift);
pfn_start_idx = (range.start - ib_umem_start(umem_odp)) >> PAGE_SHIFT; pfn_start_idx = (range.start - ib_umem_start(umem_odp)) >> PAGE_SHIFT;
num_pfns = (range.end - range.start) >> PAGE_SHIFT; num_pfns = (range.end - range.start) >> PAGE_SHIFT;
range.default_flags = HMM_PFN_REQ_FAULT; if (fault) {
range.default_flags = HMM_PFN_REQ_FAULT;
if (access_mask & ODP_WRITE_ALLOWED_BIT) if (access_mask & ODP_WRITE_ALLOWED_BIT)
range.default_flags |= HMM_PFN_REQ_WRITE; range.default_flags |= HMM_PFN_REQ_WRITE;
}
range.hmm_pfns = &(umem_odp->pfn_list[pfn_start_idx]); range.hmm_pfns = &(umem_odp->pfn_list[pfn_start_idx]);
timeout = jiffies + msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT); timeout = jiffies + msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT);
@ -417,12 +420,24 @@ retry:
for (pfn_index = 0; pfn_index < num_pfns; for (pfn_index = 0; pfn_index < num_pfns;
pfn_index += 1 << (page_shift - PAGE_SHIFT), dma_index++) { pfn_index += 1 << (page_shift - PAGE_SHIFT), dma_index++) {
/*
* Since we asked for hmm_range_fault() to populate pages, if (fault) {
* it shouldn't return an error entry on success. /*
*/ * Since we asked for hmm_range_fault() to populate
WARN_ON(range.hmm_pfns[pfn_index] & HMM_PFN_ERROR); * pages it shouldn't return an error entry on success.
WARN_ON(!(range.hmm_pfns[pfn_index] & HMM_PFN_VALID)); */
WARN_ON(range.hmm_pfns[pfn_index] & HMM_PFN_ERROR);
WARN_ON(!(range.hmm_pfns[pfn_index] & HMM_PFN_VALID));
} else {
if (!(range.hmm_pfns[pfn_index] & HMM_PFN_VALID)) {
WARN_ON(umem_odp->dma_list[dma_index]);
continue;
}
access_mask = ODP_READ_ALLOWED_BIT;
if (range.hmm_pfns[pfn_index] & HMM_PFN_WRITE)
access_mask |= ODP_WRITE_ALLOWED_BIT;
}
hmm_order = hmm_pfn_to_map_order(range.hmm_pfns[pfn_index]); hmm_order = hmm_pfn_to_map_order(range.hmm_pfns[pfn_index]);
/* If a hugepage was detected and ODP wasn't set for, the umem /* If a hugepage was detected and ODP wasn't set for, the umem
* page_shift will be used, the opposite case is an error. * page_shift will be used, the opposite case is an error.

View File

@ -681,7 +681,7 @@ static int pagefault_real_mr(struct mlx5_ib_mr *mr, struct ib_umem_odp *odp,
if (odp->umem.writable && !downgrade) if (odp->umem.writable && !downgrade)
access_mask |= ODP_WRITE_ALLOWED_BIT; access_mask |= ODP_WRITE_ALLOWED_BIT;
np = ib_umem_odp_map_dma_and_lock(odp, user_va, bcnt, access_mask); np = ib_umem_odp_map_dma_and_lock(odp, user_va, bcnt, access_mask, true);
if (np < 0) if (np < 0)
return np; return np;

View File

@ -94,7 +94,7 @@ ib_umem_odp_alloc_child(struct ib_umem_odp *root_umem, unsigned long addr,
void ib_umem_odp_release(struct ib_umem_odp *umem_odp); void ib_umem_odp_release(struct ib_umem_odp *umem_odp);
int ib_umem_odp_map_dma_and_lock(struct ib_umem_odp *umem_odp, u64 start_offset, int ib_umem_odp_map_dma_and_lock(struct ib_umem_odp *umem_odp, u64 start_offset,
u64 bcnt, u64 access_mask); u64 bcnt, u64 access_mask, bool fault);
void ib_umem_odp_unmap_dma_pages(struct ib_umem_odp *umem_odp, u64 start_offset, void ib_umem_odp_unmap_dma_pages(struct ib_umem_odp *umem_odp, u64 start_offset,
u64 bound); u64 bound);