drm/nouveau: wrap nvkm_mem objects in nvkm_memory interfaces
This is a transition step, to enable finer-grained commits while transitioning to new MMU interfaces. Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
This commit is contained in:
parent
bd447053b3
commit
bd275f1d1a
@ -1,6 +1,7 @@
|
|||||||
#ifndef __NVKM_FB_H__
|
#ifndef __NVKM_FB_H__
|
||||||
#define __NVKM_FB_H__
|
#define __NVKM_FB_H__
|
||||||
#include <core/subdev.h>
|
#include <core/subdev.h>
|
||||||
|
#include <core/memory.h>
|
||||||
|
|
||||||
#include <subdev/mmu.h>
|
#include <subdev/mmu.h>
|
||||||
|
|
||||||
@ -29,6 +30,8 @@ struct nvkm_mem {
|
|||||||
u64 offset;
|
u64 offset;
|
||||||
u64 size;
|
u64 size;
|
||||||
struct sg_table *sg;
|
struct sg_table *sg;
|
||||||
|
|
||||||
|
struct nvkm_memory *memory;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct nvkm_fb_tile {
|
struct nvkm_fb_tile {
|
||||||
|
@ -66,6 +66,7 @@ nouveau_mem_host(struct ttm_mem_reg *reg, struct ttm_dma_tt *tt)
|
|||||||
else mem->__mem.pages = tt->dma_address;
|
else mem->__mem.pages = tt->dma_address;
|
||||||
mem->_mem = &mem->__mem;
|
mem->_mem = &mem->__mem;
|
||||||
mem->mem.page = 12;
|
mem->mem.page = 12;
|
||||||
|
mem->_mem->memory = &mem->memory;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -78,6 +79,7 @@ nouveau_mem_vram(struct ttm_mem_reg *reg, bool contig, u8 page)
|
|||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
mem->mem.page = page;
|
mem->mem.page = page;
|
||||||
|
mem->_mem->memory = &mem->memory;
|
||||||
|
|
||||||
ret = ram->func->get(ram, size, 1 << page, contig ? 0 : 1 << page,
|
ret = ram->func->get(ram, size, 1 << page, contig ? 0 : 1 << page,
|
||||||
(mem->comp << 8) | mem->kind, &mem->_mem);
|
(mem->comp << 8) | mem->kind, &mem->_mem);
|
||||||
@ -97,6 +99,36 @@ nouveau_mem_del(struct ttm_mem_reg *reg)
|
|||||||
reg->mm_node = NULL;
|
reg->mm_node = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static enum nvkm_memory_target
|
||||||
|
nouveau_mem_memory_target(struct nvkm_memory *memory)
|
||||||
|
{
|
||||||
|
struct nouveau_mem *mem = container_of(memory, typeof(*mem), memory);
|
||||||
|
if (mem->_mem->mem)
|
||||||
|
return NVKM_MEM_TARGET_VRAM;
|
||||||
|
return NVKM_MEM_TARGET_HOST;
|
||||||
|
};
|
||||||
|
|
||||||
|
static u8
|
||||||
|
nouveau_mem_memory_page(struct nvkm_memory *memory)
|
||||||
|
{
|
||||||
|
struct nouveau_mem *mem = container_of(memory, typeof(*mem), memory);
|
||||||
|
return mem->mem.page;
|
||||||
|
};
|
||||||
|
|
||||||
|
static u64
|
||||||
|
nouveau_mem_memory_size(struct nvkm_memory *memory)
|
||||||
|
{
|
||||||
|
struct nouveau_mem *mem = container_of(memory, typeof(*mem), memory);
|
||||||
|
return mem->_mem->size << 12;
|
||||||
|
}
|
||||||
|
|
||||||
|
static const struct nvkm_memory_func
|
||||||
|
nouveau_mem_memory = {
|
||||||
|
.target = nouveau_mem_memory_target,
|
||||||
|
.page = nouveau_mem_memory_page,
|
||||||
|
.size = nouveau_mem_memory_size,
|
||||||
|
};
|
||||||
|
|
||||||
int
|
int
|
||||||
nouveau_mem_new(struct nouveau_cli *cli, u8 kind, u8 comp,
|
nouveau_mem_new(struct nouveau_cli *cli, u8 kind, u8 comp,
|
||||||
struct ttm_mem_reg *reg)
|
struct ttm_mem_reg *reg)
|
||||||
@ -108,6 +140,7 @@ nouveau_mem_new(struct nouveau_cli *cli, u8 kind, u8 comp,
|
|||||||
mem->cli = cli;
|
mem->cli = cli;
|
||||||
mem->kind = kind;
|
mem->kind = kind;
|
||||||
mem->comp = comp;
|
mem->comp = comp;
|
||||||
|
nvkm_memory_ctor(&nouveau_mem_memory, &mem->memory);
|
||||||
|
|
||||||
reg->mm_node = mem;
|
reg->mm_node = mem;
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -23,6 +23,13 @@ struct nouveau_mem {
|
|||||||
struct nvkm_mem __mem;
|
struct nvkm_mem __mem;
|
||||||
struct nvkm_mem *_mem;
|
struct nvkm_mem *_mem;
|
||||||
struct nvkm_vma bar_vma;
|
struct nvkm_vma bar_vma;
|
||||||
|
|
||||||
|
struct nvkm_memory memory;
|
||||||
|
};
|
||||||
|
|
||||||
|
enum nvif_vmm_get {
|
||||||
|
PTES,
|
||||||
|
LAZY,
|
||||||
};
|
};
|
||||||
|
|
||||||
int nouveau_mem_new(struct nouveau_cli *, u8 kind, u8 comp,
|
int nouveau_mem_new(struct nouveau_cli *, u8 kind, u8 comp,
|
||||||
|
@ -41,6 +41,7 @@ nvkm_vram_map(struct nvkm_memory *memory, u64 offset, struct nvkm_vmm *vmm,
|
|||||||
struct nvkm_vram *vram = nvkm_vram(memory);
|
struct nvkm_vram *vram = nvkm_vram(memory);
|
||||||
struct nvkm_mem mem = {
|
struct nvkm_mem mem = {
|
||||||
.mem = vram->mn,
|
.mem = vram->mn,
|
||||||
|
.memory = &vram->memory,
|
||||||
};
|
};
|
||||||
nvkm_vm_map_at(vma, offset, &mem);
|
nvkm_vm_map_at(vma, offset, &mem);
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -119,6 +119,12 @@ gk20a_instobj_target(struct nvkm_memory *memory)
|
|||||||
return NVKM_MEM_TARGET_NCOH;
|
return NVKM_MEM_TARGET_NCOH;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static u8
|
||||||
|
gk20a_instobj_page(struct nvkm_memory *memory)
|
||||||
|
{
|
||||||
|
return 12;
|
||||||
|
}
|
||||||
|
|
||||||
static u64
|
static u64
|
||||||
gk20a_instobj_addr(struct nvkm_memory *memory)
|
gk20a_instobj_addr(struct nvkm_memory *memory)
|
||||||
{
|
{
|
||||||
@ -343,6 +349,7 @@ static const struct nvkm_memory_func
|
|||||||
gk20a_instobj_func_dma = {
|
gk20a_instobj_func_dma = {
|
||||||
.dtor = gk20a_instobj_dtor_dma,
|
.dtor = gk20a_instobj_dtor_dma,
|
||||||
.target = gk20a_instobj_target,
|
.target = gk20a_instobj_target,
|
||||||
|
.page = gk20a_instobj_page,
|
||||||
.addr = gk20a_instobj_addr,
|
.addr = gk20a_instobj_addr,
|
||||||
.size = gk20a_instobj_size,
|
.size = gk20a_instobj_size,
|
||||||
.acquire = gk20a_instobj_acquire_dma,
|
.acquire = gk20a_instobj_acquire_dma,
|
||||||
@ -354,6 +361,7 @@ static const struct nvkm_memory_func
|
|||||||
gk20a_instobj_func_iommu = {
|
gk20a_instobj_func_iommu = {
|
||||||
.dtor = gk20a_instobj_dtor_iommu,
|
.dtor = gk20a_instobj_dtor_iommu,
|
||||||
.target = gk20a_instobj_target,
|
.target = gk20a_instobj_target,
|
||||||
|
.page = gk20a_instobj_page,
|
||||||
.addr = gk20a_instobj_addr,
|
.addr = gk20a_instobj_addr,
|
||||||
.size = gk20a_instobj_size,
|
.size = gk20a_instobj_size,
|
||||||
.acquire = gk20a_instobj_acquire_iommu,
|
.acquire = gk20a_instobj_acquire_iommu,
|
||||||
@ -531,6 +539,7 @@ gk20a_instobj_new(struct nvkm_instmem *base, u32 size, u32 align, bool zero,
|
|||||||
/* present memory for being mapped using small pages */
|
/* present memory for being mapped using small pages */
|
||||||
node->mem.size = size >> 12;
|
node->mem.size = size >> 12;
|
||||||
node->mem.memtype = 0;
|
node->mem.memtype = 0;
|
||||||
|
node->mem.memory = &node->memory;
|
||||||
|
|
||||||
nvkm_debug(subdev, "alloc size: 0x%x, align: 0x%x, gaddr: 0x%llx\n",
|
nvkm_debug(subdev, "alloc size: 0x%x, align: 0x%x, gaddr: 0x%llx\n",
|
||||||
size, align, node->mem.offset);
|
size, align, node->mem.offset);
|
||||||
|
Loading…
Reference in New Issue
Block a user