forked from Minki/linux
nouveau/svm: use the new migration invalidation
Use the new MMU_NOTIFY_MIGRATE event to skip GPU MMU invalidations of device private memory and handle the invalidation in the driver as part of migrating device private memory. Link: https://lore.kernel.org/r/20200723223004.9586-5-rcampbell@nvidia.com Signed-off-by: Ralph Campbell <rcampbell@nvidia.com> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
This commit is contained in:
parent
998427b3ad
commit
f8477ce6b5
@ -140,6 +140,7 @@ static vm_fault_t nouveau_dmem_fault_copy_one(struct nouveau_drm *drm,
|
||||
{
|
||||
struct device *dev = drm->dev->dev;
|
||||
struct page *dpage, *spage;
|
||||
struct nouveau_svmm *svmm;
|
||||
|
||||
spage = migrate_pfn_to_page(args->src[0]);
|
||||
if (!spage || !(args->src[0] & MIGRATE_PFN_MIGRATE))
|
||||
@ -154,14 +155,19 @@ static vm_fault_t nouveau_dmem_fault_copy_one(struct nouveau_drm *drm,
|
||||
if (dma_mapping_error(dev, *dma_addr))
|
||||
goto error_free_page;
|
||||
|
||||
svmm = spage->zone_device_data;
|
||||
mutex_lock(&svmm->mutex);
|
||||
nouveau_svmm_invalidate(svmm, args->start, args->end);
|
||||
if (drm->dmem->migrate.copy_func(drm, 1, NOUVEAU_APER_HOST, *dma_addr,
|
||||
NOUVEAU_APER_VRAM, nouveau_dmem_page_addr(spage)))
|
||||
goto error_dma_unmap;
|
||||
mutex_unlock(&svmm->mutex);
|
||||
|
||||
args->dst[0] = migrate_pfn(page_to_pfn(dpage)) | MIGRATE_PFN_LOCKED;
|
||||
return 0;
|
||||
|
||||
error_dma_unmap:
|
||||
mutex_unlock(&svmm->mutex);
|
||||
dma_unmap_page(dev, *dma_addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
|
||||
error_free_page:
|
||||
__free_page(dpage);
|
||||
@ -531,7 +537,8 @@ nouveau_dmem_init(struct nouveau_drm *drm)
|
||||
}
|
||||
|
||||
static unsigned long nouveau_dmem_migrate_copy_one(struct nouveau_drm *drm,
|
||||
unsigned long src, dma_addr_t *dma_addr, u64 *pfn)
|
||||
struct nouveau_svmm *svmm, unsigned long src,
|
||||
dma_addr_t *dma_addr, u64 *pfn)
|
||||
{
|
||||
struct device *dev = drm->dev->dev;
|
||||
struct page *dpage, *spage;
|
||||
@ -561,6 +568,7 @@ static unsigned long nouveau_dmem_migrate_copy_one(struct nouveau_drm *drm,
|
||||
goto out_free_page;
|
||||
}
|
||||
|
||||
dpage->zone_device_data = svmm;
|
||||
*pfn = NVIF_VMM_PFNMAP_V0_V | NVIF_VMM_PFNMAP_V0_VRAM |
|
||||
((paddr >> PAGE_SHIFT) << NVIF_VMM_PFNMAP_V0_ADDR_SHIFT);
|
||||
if (src & MIGRATE_PFN_WRITE)
|
||||
@ -584,8 +592,8 @@ static void nouveau_dmem_migrate_chunk(struct nouveau_drm *drm,
|
||||
unsigned long addr = args->start, nr_dma = 0, i;
|
||||
|
||||
for (i = 0; addr < args->end; i++) {
|
||||
args->dst[i] = nouveau_dmem_migrate_copy_one(drm, args->src[i],
|
||||
dma_addrs + nr_dma, pfns + i);
|
||||
args->dst[i] = nouveau_dmem_migrate_copy_one(drm, svmm,
|
||||
args->src[i], dma_addrs + nr_dma, pfns + i);
|
||||
if (!dma_mapping_error(drm->dev->dev, dma_addrs[nr_dma]))
|
||||
nr_dma++;
|
||||
addr += PAGE_SIZE;
|
||||
@ -616,6 +624,7 @@ nouveau_dmem_migrate_vma(struct nouveau_drm *drm,
|
||||
struct migrate_vma args = {
|
||||
.vma = vma,
|
||||
.start = start,
|
||||
.pgmap_owner = drm->dev,
|
||||
.flags = MIGRATE_VMA_SELECT_SYSTEM,
|
||||
};
|
||||
unsigned long i;
|
||||
|
@ -93,17 +93,6 @@ nouveau_ivmm_find(struct nouveau_svm *svm, u64 inst)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
struct nouveau_svmm {
|
||||
struct mmu_notifier notifier;
|
||||
struct nouveau_vmm *vmm;
|
||||
struct {
|
||||
unsigned long start;
|
||||
unsigned long limit;
|
||||
} unmanaged;
|
||||
|
||||
struct mutex mutex;
|
||||
};
|
||||
|
||||
#define SVMM_DBG(s,f,a...) \
|
||||
NV_DEBUG((s)->vmm->cli->drm, "svm-%p: "f"\n", (s), ##a)
|
||||
#define SVMM_ERR(s,f,a...) \
|
||||
@ -246,7 +235,7 @@ nouveau_svmm_join(struct nouveau_svmm *svmm, u64 inst)
|
||||
}
|
||||
|
||||
/* Invalidate SVMM address-range on GPU. */
|
||||
static void
|
||||
void
|
||||
nouveau_svmm_invalidate(struct nouveau_svmm *svmm, u64 start, u64 limit)
|
||||
{
|
||||
if (limit > start) {
|
||||
@ -279,6 +268,14 @@ nouveau_svmm_invalidate_range_start(struct mmu_notifier *mn,
|
||||
if (unlikely(!svmm->vmm))
|
||||
goto out;
|
||||
|
||||
/*
|
||||
* Ignore invalidation callbacks for device private pages since
|
||||
* the invalidation is handled as part of the migration process.
|
||||
*/
|
||||
if (update->event == MMU_NOTIFY_MIGRATE &&
|
||||
update->migrate_pgmap_owner == svmm->vmm->cli->drm->dev)
|
||||
goto out;
|
||||
|
||||
if (limit > svmm->unmanaged.start && start < svmm->unmanaged.limit) {
|
||||
if (start < svmm->unmanaged.start) {
|
||||
nouveau_svmm_invalidate(svmm, start,
|
||||
|
@ -1,11 +1,21 @@
|
||||
#ifndef __NOUVEAU_SVM_H__
|
||||
#define __NOUVEAU_SVM_H__
|
||||
#include <nvif/os.h>
|
||||
#include <linux/mmu_notifier.h>
|
||||
struct drm_device;
|
||||
struct drm_file;
|
||||
struct nouveau_drm;
|
||||
|
||||
struct nouveau_svmm;
|
||||
struct nouveau_svmm {
|
||||
struct mmu_notifier notifier;
|
||||
struct nouveau_vmm *vmm;
|
||||
struct {
|
||||
unsigned long start;
|
||||
unsigned long limit;
|
||||
} unmanaged;
|
||||
|
||||
struct mutex mutex;
|
||||
};
|
||||
|
||||
#if IS_ENABLED(CONFIG_DRM_NOUVEAU_SVM)
|
||||
void nouveau_svm_init(struct nouveau_drm *);
|
||||
@ -19,6 +29,7 @@ int nouveau_svmm_join(struct nouveau_svmm *, u64 inst);
|
||||
void nouveau_svmm_part(struct nouveau_svmm *, u64 inst);
|
||||
int nouveau_svmm_bind(struct drm_device *, void *, struct drm_file *);
|
||||
|
||||
void nouveau_svmm_invalidate(struct nouveau_svmm *svmm, u64 start, u64 limit);
|
||||
u64 *nouveau_pfns_alloc(unsigned long npages);
|
||||
void nouveau_pfns_free(u64 *pfns);
|
||||
void nouveau_pfns_map(struct nouveau_svmm *svmm, struct mm_struct *mm,
|
||||
|
Loading…
Reference in New Issue
Block a user