nouveau: factor out device memory address calculation
Factor out the repeated device memory address calculation into a helper. Link: https://lore.kernel.org/r/20190814075928.23766-4-hch@lst.de Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Ralph Campbell <rcampbell@nvidia.com> Tested-by: Ralph Campbell <rcampbell@nvidia.com> Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
This commit is contained in:
parent
dea027f282
commit
64de8b8d65
@ -102,6 +102,14 @@ struct nouveau_migrate {
|
|||||||
unsigned long dma_nr;
|
unsigned long dma_nr;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static unsigned long nouveau_dmem_page_addr(struct page *page)
|
||||||
|
{
|
||||||
|
struct nouveau_dmem_chunk *chunk = page->zone_device_data;
|
||||||
|
unsigned long idx = page_to_pfn(page) - chunk->pfn_first;
|
||||||
|
|
||||||
|
return (idx << PAGE_SHIFT) + chunk->bo->bo.offset;
|
||||||
|
}
|
||||||
|
|
||||||
static void nouveau_dmem_page_free(struct page *page)
|
static void nouveau_dmem_page_free(struct page *page)
|
||||||
{
|
{
|
||||||
struct nouveau_dmem_chunk *chunk = page->zone_device_data;
|
struct nouveau_dmem_chunk *chunk = page->zone_device_data;
|
||||||
@ -169,9 +177,7 @@ nouveau_dmem_fault_alloc_and_copy(struct vm_area_struct *vma,
|
|||||||
/* Copy things over */
|
/* Copy things over */
|
||||||
copy = drm->dmem->migrate.copy_func;
|
copy = drm->dmem->migrate.copy_func;
|
||||||
for (addr = start, i = 0; addr < end; addr += PAGE_SIZE, i++) {
|
for (addr = start, i = 0; addr < end; addr += PAGE_SIZE, i++) {
|
||||||
struct nouveau_dmem_chunk *chunk;
|
|
||||||
struct page *spage, *dpage;
|
struct page *spage, *dpage;
|
||||||
u64 src_addr, dst_addr;
|
|
||||||
|
|
||||||
dpage = migrate_pfn_to_page(dst_pfns[i]);
|
dpage = migrate_pfn_to_page(dst_pfns[i]);
|
||||||
if (!dpage || dst_pfns[i] == MIGRATE_PFN_ERROR)
|
if (!dpage || dst_pfns[i] == MIGRATE_PFN_ERROR)
|
||||||
@ -194,14 +200,10 @@ nouveau_dmem_fault_alloc_and_copy(struct vm_area_struct *vma,
|
|||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
dst_addr = fault->dma[fault->npages++];
|
ret = copy(drm, 1, NOUVEAU_APER_HOST,
|
||||||
|
fault->dma[fault->npages++],
|
||||||
chunk = spage->zone_device_data;
|
NOUVEAU_APER_VRAM,
|
||||||
src_addr = page_to_pfn(spage) - chunk->pfn_first;
|
nouveau_dmem_page_addr(spage));
|
||||||
src_addr = (src_addr << PAGE_SHIFT) + chunk->bo->bo.offset;
|
|
||||||
|
|
||||||
ret = copy(drm, 1, NOUVEAU_APER_HOST, dst_addr,
|
|
||||||
NOUVEAU_APER_VRAM, src_addr);
|
|
||||||
if (ret) {
|
if (ret) {
|
||||||
dst_pfns[i] = MIGRATE_PFN_ERROR;
|
dst_pfns[i] = MIGRATE_PFN_ERROR;
|
||||||
__free_page(dpage);
|
__free_page(dpage);
|
||||||
@ -687,18 +689,12 @@ nouveau_dmem_migrate_alloc_and_copy(struct vm_area_struct *vma,
|
|||||||
/* Copy things over */
|
/* Copy things over */
|
||||||
copy = drm->dmem->migrate.copy_func;
|
copy = drm->dmem->migrate.copy_func;
|
||||||
for (addr = start, i = 0; addr < end; addr += PAGE_SIZE, i++) {
|
for (addr = start, i = 0; addr < end; addr += PAGE_SIZE, i++) {
|
||||||
struct nouveau_dmem_chunk *chunk;
|
|
||||||
struct page *spage, *dpage;
|
struct page *spage, *dpage;
|
||||||
u64 src_addr, dst_addr;
|
|
||||||
|
|
||||||
dpage = migrate_pfn_to_page(dst_pfns[i]);
|
dpage = migrate_pfn_to_page(dst_pfns[i]);
|
||||||
if (!dpage || dst_pfns[i] == MIGRATE_PFN_ERROR)
|
if (!dpage || dst_pfns[i] == MIGRATE_PFN_ERROR)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
chunk = dpage->zone_device_data;
|
|
||||||
dst_addr = page_to_pfn(dpage) - chunk->pfn_first;
|
|
||||||
dst_addr = (dst_addr << PAGE_SHIFT) + chunk->bo->bo.offset;
|
|
||||||
|
|
||||||
spage = migrate_pfn_to_page(src_pfns[i]);
|
spage = migrate_pfn_to_page(src_pfns[i]);
|
||||||
if (!spage || !(src_pfns[i] & MIGRATE_PFN_MIGRATE)) {
|
if (!spage || !(src_pfns[i] & MIGRATE_PFN_MIGRATE)) {
|
||||||
nouveau_dmem_page_free_locked(drm, dpage);
|
nouveau_dmem_page_free_locked(drm, dpage);
|
||||||
@ -716,10 +712,10 @@ nouveau_dmem_migrate_alloc_and_copy(struct vm_area_struct *vma,
|
|||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
src_addr = migrate->dma[migrate->dma_nr++];
|
ret = copy(drm, 1, NOUVEAU_APER_VRAM,
|
||||||
|
nouveau_dmem_page_addr(dpage),
|
||||||
ret = copy(drm, 1, NOUVEAU_APER_VRAM, dst_addr,
|
NOUVEAU_APER_HOST,
|
||||||
NOUVEAU_APER_HOST, src_addr);
|
migrate->dma[migrate->dma_nr++]);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
nouveau_dmem_page_free_locked(drm, dpage);
|
nouveau_dmem_page_free_locked(drm, dpage);
|
||||||
dst_pfns[i] = 0;
|
dst_pfns[i] = 0;
|
||||||
@ -846,7 +842,6 @@ nouveau_dmem_convert_pfn(struct nouveau_drm *drm,
|
|||||||
|
|
||||||
npages = (range->end - range->start) >> PAGE_SHIFT;
|
npages = (range->end - range->start) >> PAGE_SHIFT;
|
||||||
for (i = 0; i < npages; ++i) {
|
for (i = 0; i < npages; ++i) {
|
||||||
struct nouveau_dmem_chunk *chunk;
|
|
||||||
struct page *page;
|
struct page *page;
|
||||||
uint64_t addr;
|
uint64_t addr;
|
||||||
|
|
||||||
@ -864,10 +859,7 @@ nouveau_dmem_convert_pfn(struct nouveau_drm *drm,
|
|||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
chunk = page->zone_device_data;
|
addr = nouveau_dmem_page_addr(page);
|
||||||
addr = page_to_pfn(page) - chunk->pfn_first;
|
|
||||||
addr = (addr + chunk->bo->bo.mem.start) << PAGE_SHIFT;
|
|
||||||
|
|
||||||
range->pfns[i] &= ((1UL << range->pfn_shift) - 1);
|
range->pfns[i] &= ((1UL << range->pfn_shift) - 1);
|
||||||
range->pfns[i] |= (addr >> PAGE_SHIFT) << range->pfn_shift;
|
range->pfns[i] |= (addr >> PAGE_SHIFT) << range->pfn_shift;
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user