drm/nouveau: separate constant-va tracking from nvkm vma structure
Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
This commit is contained in:
parent
9ce523cc3b
commit
24e8375b1b
@ -34,6 +34,7 @@ nouveau-y += nouveau_mem.o
|
||||
nouveau-y += nouveau_prime.o
|
||||
nouveau-y += nouveau_sgdma.o
|
||||
nouveau-y += nouveau_ttm.o
|
||||
nouveau-y += nouveau_vmm.o
|
||||
|
||||
# DRM - modesetting
|
||||
nouveau-$(CONFIG_DRM_NOUVEAU_BACKLIGHT) += nouveau_backlight.o
|
||||
|
@ -16,8 +16,6 @@ struct nvkm_vm_pgd {
|
||||
};
|
||||
|
||||
struct nvkm_vma {
|
||||
struct list_head head;
|
||||
int refcount;
|
||||
struct nvkm_vm *vm;
|
||||
struct nvkm_mm_node *node;
|
||||
union {
|
||||
|
@ -34,6 +34,7 @@
|
||||
#include "nouveau_gem.h"
|
||||
#include "nouveau_chan.h"
|
||||
#include "nouveau_abi16.h"
|
||||
#include "nouveau_vmm.h"
|
||||
|
||||
static struct nouveau_abi16 *
|
||||
nouveau_abi16(struct drm_file *file_priv)
|
||||
@ -134,7 +135,7 @@ nouveau_abi16_chan_fini(struct nouveau_abi16 *abi16,
|
||||
}
|
||||
|
||||
if (chan->ntfy) {
|
||||
nouveau_bo_vma_del(chan->ntfy, &chan->ntfy_vma);
|
||||
nouveau_vma_del(&chan->ntfy_vma);
|
||||
nouveau_bo_unpin(chan->ntfy);
|
||||
drm_gem_object_unreference_unlocked(&chan->ntfy->gem);
|
||||
}
|
||||
@ -329,8 +330,7 @@ nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS)
|
||||
goto done;
|
||||
|
||||
if (device->info.family >= NV_DEVICE_INFO_V0_TESLA) {
|
||||
ret = nouveau_bo_vma_add(chan->ntfy, cli->vm,
|
||||
&chan->ntfy_vma);
|
||||
ret = nouveau_vma_new(chan->ntfy, &cli->vmm, &chan->ntfy_vma);
|
||||
if (ret)
|
||||
goto done;
|
||||
}
|
||||
@ -548,8 +548,8 @@ nouveau_abi16_ioctl_notifierobj_alloc(ABI16_IOCTL_ARGS)
|
||||
if (device->info.family >= NV_DEVICE_INFO_V0_TESLA) {
|
||||
args.target = NV_DMA_V0_TARGET_VM;
|
||||
args.access = NV_DMA_V0_ACCESS_VM;
|
||||
args.start += chan->ntfy_vma.offset;
|
||||
args.limit += chan->ntfy_vma.offset;
|
||||
args.start += chan->ntfy_vma->addr;
|
||||
args.limit += chan->ntfy_vma->addr;
|
||||
} else
|
||||
if (drm->agp.bridge) {
|
||||
args.target = NV_DMA_V0_TARGET_AGP;
|
||||
|
@ -23,7 +23,7 @@ struct nouveau_abi16_chan {
|
||||
struct nouveau_channel *chan;
|
||||
struct list_head notifiers;
|
||||
struct nouveau_bo *ntfy;
|
||||
struct nvkm_vma ntfy_vma;
|
||||
struct nouveau_vma *ntfy_vma;
|
||||
struct nvkm_mm heap;
|
||||
};
|
||||
|
||||
|
@ -38,6 +38,7 @@
|
||||
#include "nouveau_ttm.h"
|
||||
#include "nouveau_gem.h"
|
||||
#include "nouveau_mem.h"
|
||||
#include "nouveau_vmm.h"
|
||||
|
||||
/*
|
||||
* NV10-NV40 tiling helpers
|
||||
@ -1223,7 +1224,7 @@ nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, bool evict,
|
||||
{
|
||||
struct nouveau_mem *mem = new_reg ? nouveau_mem(new_reg) : NULL;
|
||||
struct nouveau_bo *nvbo = nouveau_bo(bo);
|
||||
struct nvkm_vma *vma;
|
||||
struct nouveau_vma *vma;
|
||||
|
||||
/* ttm can now (stupidly) pass the driver bos it didn't create... */
|
||||
if (bo->destroy != nouveau_bo_del_ttm)
|
||||
@ -1232,12 +1233,12 @@ nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, bool evict,
|
||||
if (mem && new_reg->mem_type != TTM_PL_SYSTEM &&
|
||||
mem->mem.page == nvbo->page) {
|
||||
list_for_each_entry(vma, &nvbo->vma_list, head) {
|
||||
nvkm_vm_map(vma, mem->_mem);
|
||||
nouveau_vma_map(vma, mem);
|
||||
}
|
||||
} else {
|
||||
list_for_each_entry(vma, &nvbo->vma_list, head) {
|
||||
WARN_ON(ttm_bo_wait(bo, false, false));
|
||||
nvkm_vm_unmap(vma);
|
||||
nouveau_vma_unmap(vma);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1599,47 +1600,3 @@ struct ttm_bo_driver nouveau_bo_driver = {
|
||||
.io_mem_free = &nouveau_ttm_io_mem_free,
|
||||
.io_mem_pfn = ttm_bo_default_io_mem_pfn,
|
||||
};
|
||||
|
||||
struct nvkm_vma *
|
||||
nouveau_bo_vma_find(struct nouveau_bo *nvbo, struct nvkm_vm *vm)
|
||||
{
|
||||
struct nvkm_vma *vma;
|
||||
list_for_each_entry(vma, &nvbo->vma_list, head) {
|
||||
if (vma->vm == vm)
|
||||
return vma;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
int
|
||||
nouveau_bo_vma_add(struct nouveau_bo *nvbo, struct nvkm_vm *vm,
|
||||
struct nvkm_vma *vma)
|
||||
{
|
||||
const u32 size = nvbo->bo.mem.num_pages << PAGE_SHIFT;
|
||||
struct nouveau_mem *mem = nouveau_mem(&nvbo->bo.mem);
|
||||
int ret;
|
||||
|
||||
ret = nvkm_vm_get(vm, size, nvbo->page, NV_MEM_ACCESS_RW, vma);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (nvbo->bo.mem.mem_type != TTM_PL_SYSTEM &&
|
||||
mem->mem.page == nvbo->page)
|
||||
nvkm_vm_map(vma, mem->_mem);
|
||||
|
||||
list_add_tail(&vma->head, &nvbo->vma_list);
|
||||
vma->refcount = 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void
|
||||
nouveau_bo_vma_del(struct nouveau_bo *nvbo, struct nvkm_vma *vma)
|
||||
{
|
||||
if (vma->node) {
|
||||
if (nvbo->bo.mem.mem_type != TTM_PL_SYSTEM)
|
||||
nvkm_vm_unmap(vma);
|
||||
nvkm_vm_put(vma);
|
||||
list_del(&vma->head);
|
||||
}
|
||||
}
|
||||
|
@ -93,13 +93,6 @@ int nouveau_bo_validate(struct nouveau_bo *, bool interruptible,
|
||||
void nouveau_bo_sync_for_device(struct nouveau_bo *nvbo);
|
||||
void nouveau_bo_sync_for_cpu(struct nouveau_bo *nvbo);
|
||||
|
||||
struct nvkm_vma *
|
||||
nouveau_bo_vma_find(struct nouveau_bo *, struct nvkm_vm *);
|
||||
|
||||
int nouveau_bo_vma_add(struct nouveau_bo *, struct nvkm_vm *,
|
||||
struct nvkm_vma *);
|
||||
void nouveau_bo_vma_del(struct nouveau_bo *, struct nvkm_vma *);
|
||||
|
||||
/* TODO: submit equivalent to TTM generic API upstream? */
|
||||
static inline void __iomem *
|
||||
nvbo_kmap_obj_iovirtual(struct nouveau_bo *nvbo)
|
||||
|
@ -40,6 +40,7 @@
|
||||
#include "nouveau_chan.h"
|
||||
#include "nouveau_fence.h"
|
||||
#include "nouveau_abi16.h"
|
||||
#include "nouveau_vmm.h"
|
||||
|
||||
MODULE_PARM_DESC(vram_pushbuf, "Create DMA push buffers in VRAM");
|
||||
int nouveau_vram_pushbuf;
|
||||
@ -91,7 +92,7 @@ nouveau_channel_del(struct nouveau_channel **pchan)
|
||||
nvif_notify_fini(&chan->kill);
|
||||
nvif_object_fini(&chan->user);
|
||||
nvif_object_fini(&chan->push.ctxdma);
|
||||
nouveau_bo_vma_del(chan->push.buffer, &chan->push.vma);
|
||||
nouveau_vma_del(&chan->push.vma);
|
||||
nouveau_bo_unmap(chan->push.buffer);
|
||||
if (chan->push.buffer && chan->push.buffer->pin_refcnt)
|
||||
nouveau_bo_unpin(chan->push.buffer);
|
||||
@ -142,11 +143,11 @@ nouveau_channel_prep(struct nouveau_drm *drm, struct nvif_device *device,
|
||||
* pushbuf lives in, this is because the GEM code requires that
|
||||
* we be able to call out to other (indirect) push buffers
|
||||
*/
|
||||
chan->push.vma.offset = chan->push.buffer->bo.offset;
|
||||
chan->push.addr = chan->push.buffer->bo.offset;
|
||||
|
||||
if (device->info.family >= NV_DEVICE_INFO_V0_TESLA) {
|
||||
ret = nouveau_bo_vma_add(chan->push.buffer, cli->vm,
|
||||
&chan->push.vma);
|
||||
ret = nouveau_vma_new(chan->push.buffer, &cli->vmm,
|
||||
&chan->push.vma);
|
||||
if (ret) {
|
||||
nouveau_channel_del(pchan);
|
||||
return ret;
|
||||
@ -156,6 +157,8 @@ nouveau_channel_prep(struct nouveau_drm *drm, struct nvif_device *device,
|
||||
args.access = NV_DMA_V0_ACCESS_VM;
|
||||
args.start = 0;
|
||||
args.limit = cli->vm->mmu->limit - 1;
|
||||
|
||||
chan->push.addr = chan->push.vma->addr;
|
||||
} else
|
||||
if (chan->push.buffer->bo.mem.mem_type == TTM_PL_VRAM) {
|
||||
if (device->info.family == NV_DEVICE_INFO_V0_TNT) {
|
||||
@ -233,20 +236,20 @@ nouveau_channel_ind(struct nouveau_drm *drm, struct nvif_device *device,
|
||||
args.kepler.version = 0;
|
||||
args.kepler.engines = engine;
|
||||
args.kepler.ilength = 0x02000;
|
||||
args.kepler.ioffset = 0x10000 + chan->push.vma.offset;
|
||||
args.kepler.ioffset = 0x10000 + chan->push.addr;
|
||||
args.kepler.vm = 0;
|
||||
size = sizeof(args.kepler);
|
||||
} else
|
||||
if (oclass[0] >= FERMI_CHANNEL_GPFIFO) {
|
||||
args.fermi.version = 0;
|
||||
args.fermi.ilength = 0x02000;
|
||||
args.fermi.ioffset = 0x10000 + chan->push.vma.offset;
|
||||
args.fermi.ioffset = 0x10000 + chan->push.addr;
|
||||
args.fermi.vm = 0;
|
||||
size = sizeof(args.fermi);
|
||||
} else {
|
||||
args.nv50.version = 0;
|
||||
args.nv50.ilength = 0x02000;
|
||||
args.nv50.ioffset = 0x10000 + chan->push.vma.offset;
|
||||
args.nv50.ioffset = 0x10000 + chan->push.addr;
|
||||
args.nv50.pushbuf = nvif_handle(&chan->push.ctxdma);
|
||||
args.nv50.vm = 0;
|
||||
size = sizeof(args.nv50);
|
||||
@ -293,7 +296,7 @@ nouveau_channel_dma(struct nouveau_drm *drm, struct nvif_device *device,
|
||||
/* create channel object */
|
||||
args.version = 0;
|
||||
args.pushbuf = nvif_handle(&chan->push.ctxdma);
|
||||
args.offset = chan->push.vma.offset;
|
||||
args.offset = chan->push.addr;
|
||||
|
||||
do {
|
||||
ret = nvif_object_init(&device->object, 0, *oclass++,
|
||||
|
@ -16,8 +16,9 @@ struct nouveau_channel {
|
||||
|
||||
struct {
|
||||
struct nouveau_bo *buffer;
|
||||
struct nvkm_vma vma;
|
||||
struct nouveau_vma *vma;
|
||||
struct nvif_object ctxdma;
|
||||
u64 addr;
|
||||
} push;
|
||||
|
||||
/* TODO: this will be reworked in the near future */
|
||||
|
@ -1,14 +1,11 @@
|
||||
#ifndef __NOUVEAU_DISPLAY_H__
|
||||
#define __NOUVEAU_DISPLAY_H__
|
||||
|
||||
#include <subdev/mmu.h>
|
||||
|
||||
#include "nouveau_drv.h"
|
||||
|
||||
struct nouveau_framebuffer {
|
||||
struct drm_framebuffer base;
|
||||
struct nouveau_bo *nvbo;
|
||||
struct nvkm_vma vma;
|
||||
struct nouveau_vma *vma;
|
||||
u32 r_handle;
|
||||
u32 r_format;
|
||||
u32 r_pitch;
|
||||
|
@ -26,6 +26,7 @@
|
||||
|
||||
#include "nouveau_drv.h"
|
||||
#include "nouveau_dma.h"
|
||||
#include "nouveau_vmm.h"
|
||||
|
||||
void
|
||||
OUT_RINGp(struct nouveau_channel *chan, const void *data, unsigned nr_dwords)
|
||||
@ -71,11 +72,11 @@ READ_GET(struct nouveau_channel *chan, uint64_t *prev_get, int *timeout)
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
if (val < chan->push.vma.offset ||
|
||||
val > chan->push.vma.offset + (chan->dma.max << 2))
|
||||
if (val < chan->push.addr ||
|
||||
val > chan->push.addr + (chan->dma.max << 2))
|
||||
return -EINVAL;
|
||||
|
||||
return (val - chan->push.vma.offset) >> 2;
|
||||
return (val - chan->push.addr) >> 2;
|
||||
}
|
||||
|
||||
void
|
||||
@ -84,13 +85,13 @@ nv50_dma_push(struct nouveau_channel *chan, struct nouveau_bo *bo,
|
||||
{
|
||||
struct nouveau_cli *cli = (void *)chan->user.client;
|
||||
struct nouveau_bo *pb = chan->push.buffer;
|
||||
struct nvkm_vma *vma;
|
||||
struct nouveau_vma *vma;
|
||||
int ip = (chan->dma.ib_put * 2) + chan->dma.ib_base;
|
||||
u64 offset;
|
||||
|
||||
vma = nouveau_bo_vma_find(bo, cli->vm);
|
||||
vma = nouveau_vma_find(bo, &cli->vmm);
|
||||
BUG_ON(!vma);
|
||||
offset = vma->offset + delta;
|
||||
offset = vma->addr + delta;
|
||||
|
||||
BUG_ON(chan->dma.ib_free < 1);
|
||||
|
||||
@ -224,7 +225,7 @@ nouveau_dma_wait(struct nouveau_channel *chan, int slots, int size)
|
||||
* instruct the GPU to jump back to the start right
|
||||
* after processing the currently pending commands.
|
||||
*/
|
||||
OUT_RING(chan, chan->push.vma.offset | 0x20000000);
|
||||
OUT_RING(chan, chan->push.addr | 0x20000000);
|
||||
|
||||
/* wait for GET to depart from the skips area.
|
||||
* prevents writing GET==PUT and causing a race
|
||||
|
@ -140,7 +140,7 @@ BEGIN_IMC0(struct nouveau_channel *chan, int subc, int mthd, u16 data)
|
||||
#define WRITE_PUT(val) do { \
|
||||
mb(); \
|
||||
nouveau_bo_rd32(chan->push.buffer, 0); \
|
||||
nvif_wr32(&chan->user, chan->user_put, ((val) << 2) + chan->push.vma.offset); \
|
||||
nvif_wr32(&chan->user, chan->user_put, ((val) << 2) + chan->push.addr);\
|
||||
} while (0)
|
||||
|
||||
static inline void
|
||||
|
@ -114,8 +114,8 @@ nouveau_name(struct drm_device *dev)
|
||||
static void
|
||||
nouveau_cli_fini(struct nouveau_cli *cli)
|
||||
{
|
||||
nvkm_vm_ref(NULL, &nvxx_client(&cli->base)->vm, NULL);
|
||||
usif_client_fini(cli);
|
||||
nouveau_vmm_fini(&cli->vmm);
|
||||
nvif_device_fini(&cli->device);
|
||||
mutex_lock(&cli->drm->master.lock);
|
||||
nvif_client_fini(&cli->base);
|
||||
@ -472,12 +472,11 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags)
|
||||
goto fail_device;
|
||||
}
|
||||
|
||||
ret = nvkm_vm_new(nvxx_device(&drm->client.device),
|
||||
0, (1ULL << 40), 0x1000, NULL,
|
||||
&drm->client.vm);
|
||||
ret = nouveau_vmm_init(&drm->client, 0, &drm->client.vmm);
|
||||
if (ret)
|
||||
goto fail_device;
|
||||
|
||||
drm->client.vm = drm->client.vmm.vm;
|
||||
nvxx_client(&drm->client.base)->vm = drm->client.vm;
|
||||
}
|
||||
|
||||
@ -863,11 +862,11 @@ nouveau_drm_open(struct drm_device *dev, struct drm_file *fpriv)
|
||||
cli->base.super = false;
|
||||
|
||||
if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
|
||||
ret = nvkm_vm_new(nvxx_device(&drm->client.device), 0,
|
||||
(1ULL << 40), 0x1000, NULL, &cli->vm);
|
||||
ret = nouveau_vmm_init(cli, 0, &cli->vmm);
|
||||
if (ret)
|
||||
goto done;
|
||||
|
||||
cli->vm = cli->vmm.vm;
|
||||
nvxx_client(&cli->base)->vm = cli->vm;
|
||||
}
|
||||
|
||||
|
@ -61,6 +61,7 @@ struct platform_device;
|
||||
|
||||
#include "nouveau_fence.h"
|
||||
#include "nouveau_bios.h"
|
||||
#include "nouveau_vmm.h"
|
||||
|
||||
struct nouveau_drm_tile {
|
||||
struct nouveau_fence *fence;
|
||||
@ -90,8 +91,9 @@ struct nouveau_cli {
|
||||
struct mutex mutex;
|
||||
|
||||
struct nvif_device device;
|
||||
struct nouveau_vmm vmm;
|
||||
|
||||
struct nvkm_vm *vm; /*XXX*/
|
||||
struct nvkm_vm *vm;
|
||||
struct list_head head;
|
||||
void *abi16;
|
||||
struct list_head objects;
|
||||
|
@ -48,6 +48,7 @@
|
||||
#include "nouveau_bo.h"
|
||||
#include "nouveau_fbcon.h"
|
||||
#include "nouveau_chan.h"
|
||||
#include "nouveau_vmm.h"
|
||||
|
||||
#include "nouveau_crtc.h"
|
||||
|
||||
@ -348,7 +349,7 @@ nouveau_fbcon_create(struct drm_fb_helper *helper,
|
||||
|
||||
chan = nouveau_nofbaccel ? NULL : drm->channel;
|
||||
if (chan && device->info.family >= NV_DEVICE_INFO_V0_TESLA) {
|
||||
ret = nouveau_bo_vma_add(nvbo, drm->client.vm, &fb->vma);
|
||||
ret = nouveau_vma_new(nvbo, &drm->client.vmm, &fb->vma);
|
||||
if (ret) {
|
||||
NV_ERROR(drm, "failed to map fb into chan: %d\n", ret);
|
||||
chan = NULL;
|
||||
@ -402,7 +403,7 @@ nouveau_fbcon_create(struct drm_fb_helper *helper,
|
||||
|
||||
out_unlock:
|
||||
if (chan)
|
||||
nouveau_bo_vma_del(fb->nvbo, &fb->vma);
|
||||
nouveau_vma_del(&fb->vma);
|
||||
nouveau_bo_unmap(fb->nvbo);
|
||||
out_unpin:
|
||||
nouveau_bo_unpin(fb->nvbo);
|
||||
@ -429,7 +430,7 @@ nouveau_fbcon_destroy(struct drm_device *dev, struct nouveau_fbdev *fbcon)
|
||||
drm_fb_helper_fini(&fbcon->helper);
|
||||
|
||||
if (nouveau_fb->nvbo) {
|
||||
nouveau_bo_vma_del(nouveau_fb->nvbo, &nouveau_fb->vma);
|
||||
nouveau_vma_del(&nouveau_fb->vma);
|
||||
nouveau_bo_unmap(nouveau_fb->nvbo);
|
||||
nouveau_bo_unpin(nouveau_fb->nvbo);
|
||||
drm_framebuffer_unreference(&nouveau_fb->base);
|
||||
|
@ -88,7 +88,7 @@ int nouveau_flip_complete(struct nvif_notify *);
|
||||
|
||||
struct nv84_fence_chan {
|
||||
struct nouveau_fence_chan base;
|
||||
struct nvkm_vma vma;
|
||||
struct nouveau_vma *vma;
|
||||
};
|
||||
|
||||
struct nv84_fence_priv {
|
||||
|
@ -31,6 +31,7 @@
|
||||
|
||||
#include "nouveau_ttm.h"
|
||||
#include "nouveau_gem.h"
|
||||
#include "nouveau_vmm.h"
|
||||
|
||||
void
|
||||
nouveau_gem_object_del(struct drm_gem_object *gem)
|
||||
@ -64,8 +65,8 @@ nouveau_gem_object_open(struct drm_gem_object *gem, struct drm_file *file_priv)
|
||||
struct nouveau_cli *cli = nouveau_cli(file_priv);
|
||||
struct nouveau_bo *nvbo = nouveau_gem_object(gem);
|
||||
struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
|
||||
struct nvkm_vma *vma;
|
||||
struct device *dev = drm->dev->dev;
|
||||
struct nouveau_vma *vma;
|
||||
int ret;
|
||||
|
||||
if (!cli->vm)
|
||||
@ -75,30 +76,13 @@ nouveau_gem_object_open(struct drm_gem_object *gem, struct drm_file *file_priv)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
vma = nouveau_bo_vma_find(nvbo, cli->vm);
|
||||
if (!vma) {
|
||||
vma = kzalloc(sizeof(*vma), GFP_KERNEL);
|
||||
if (!vma) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = pm_runtime_get_sync(dev);
|
||||
if (ret < 0 && ret != -EACCES) {
|
||||
kfree(vma);
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = nouveau_bo_vma_add(nvbo, cli->vm, vma);
|
||||
if (ret)
|
||||
kfree(vma);
|
||||
|
||||
pm_runtime_mark_last_busy(dev);
|
||||
pm_runtime_put_autosuspend(dev);
|
||||
} else {
|
||||
vma->refcount++;
|
||||
}
|
||||
ret = pm_runtime_get_sync(dev);
|
||||
if (ret < 0 && ret != -EACCES)
|
||||
goto out;
|
||||
|
||||
ret = nouveau_vma_new(nvbo, &cli->vmm, &vma);
|
||||
pm_runtime_mark_last_busy(dev);
|
||||
pm_runtime_put_autosuspend(dev);
|
||||
out:
|
||||
ttm_bo_unreserve(&nvbo->bo);
|
||||
return ret;
|
||||
@ -107,14 +91,12 @@ out:
|
||||
static void
|
||||
nouveau_gem_object_delete(void *data)
|
||||
{
|
||||
struct nvkm_vma *vma = data;
|
||||
nvkm_vm_unmap(vma);
|
||||
nvkm_vm_put(vma);
|
||||
kfree(vma);
|
||||
struct nouveau_vma *vma = data;
|
||||
nouveau_vma_del(&vma);
|
||||
}
|
||||
|
||||
static void
|
||||
nouveau_gem_object_unmap(struct nouveau_bo *nvbo, struct nvkm_vma *vma)
|
||||
nouveau_gem_object_unmap(struct nouveau_bo *nvbo, struct nouveau_vma *vma)
|
||||
{
|
||||
const bool mapped = nvbo->bo.mem.mem_type != TTM_PL_SYSTEM;
|
||||
struct reservation_object *resv = nvbo->bo.resv;
|
||||
@ -123,7 +105,7 @@ nouveau_gem_object_unmap(struct nouveau_bo *nvbo, struct nvkm_vma *vma)
|
||||
|
||||
fobj = reservation_object_get_list(resv);
|
||||
|
||||
list_del(&vma->head);
|
||||
list_del_init(&vma->head);
|
||||
|
||||
if (fobj && fobj->shared_count > 1)
|
||||
ttm_bo_wait(&nvbo->bo, false, false);
|
||||
@ -133,14 +115,10 @@ nouveau_gem_object_unmap(struct nouveau_bo *nvbo, struct nvkm_vma *vma)
|
||||
else
|
||||
fence = reservation_object_get_excl(nvbo->bo.resv);
|
||||
|
||||
if (fence && mapped) {
|
||||
if (fence && mapped)
|
||||
nouveau_fence_work(fence, nouveau_gem_object_delete, vma);
|
||||
} else {
|
||||
if (mapped)
|
||||
nvkm_vm_unmap(vma);
|
||||
nvkm_vm_put(vma);
|
||||
kfree(vma);
|
||||
}
|
||||
else
|
||||
nouveau_vma_del(&vma);
|
||||
}
|
||||
|
||||
void
|
||||
@ -150,7 +128,7 @@ nouveau_gem_object_close(struct drm_gem_object *gem, struct drm_file *file_priv)
|
||||
struct nouveau_bo *nvbo = nouveau_gem_object(gem);
|
||||
struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
|
||||
struct device *dev = drm->dev->dev;
|
||||
struct nvkm_vma *vma;
|
||||
struct nouveau_vma *vma;
|
||||
int ret;
|
||||
|
||||
if (!cli->vm)
|
||||
@ -160,9 +138,9 @@ nouveau_gem_object_close(struct drm_gem_object *gem, struct drm_file *file_priv)
|
||||
if (ret)
|
||||
return;
|
||||
|
||||
vma = nouveau_bo_vma_find(nvbo, cli->vm);
|
||||
vma = nouveau_vma_find(nvbo, &cli->vmm);
|
||||
if (vma) {
|
||||
if (--vma->refcount == 0) {
|
||||
if (--vma->refs == 0) {
|
||||
ret = pm_runtime_get_sync(dev);
|
||||
if (!WARN_ON(ret < 0 && ret != -EACCES)) {
|
||||
nouveau_gem_object_unmap(nvbo, vma);
|
||||
@ -227,7 +205,7 @@ nouveau_gem_info(struct drm_file *file_priv, struct drm_gem_object *gem,
|
||||
{
|
||||
struct nouveau_cli *cli = nouveau_cli(file_priv);
|
||||
struct nouveau_bo *nvbo = nouveau_gem_object(gem);
|
||||
struct nvkm_vma *vma;
|
||||
struct nouveau_vma *vma;
|
||||
|
||||
if (is_power_of_2(nvbo->valid_domains))
|
||||
rep->domain = nvbo->valid_domains;
|
||||
@ -237,11 +215,11 @@ nouveau_gem_info(struct drm_file *file_priv, struct drm_gem_object *gem,
|
||||
rep->domain = NOUVEAU_GEM_DOMAIN_VRAM;
|
||||
rep->offset = nvbo->bo.offset;
|
||||
if (cli->vm) {
|
||||
vma = nouveau_bo_vma_find(nvbo, cli->vm);
|
||||
vma = nouveau_vma_find(nvbo, &cli->vmm);
|
||||
if (!vma)
|
||||
return -EINVAL;
|
||||
|
||||
rep->offset = vma->offset;
|
||||
rep->offset = vma->addr;
|
||||
}
|
||||
|
||||
rep->size = nvbo->bo.mem.num_pages << PAGE_SHIFT;
|
||||
@ -798,7 +776,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
|
||||
bo[push[i].bo_index].user_priv;
|
||||
uint32_t cmd;
|
||||
|
||||
cmd = chan->push.vma.offset + ((chan->dma.cur + 2) << 2);
|
||||
cmd = chan->push.addr + ((chan->dma.cur + 2) << 2);
|
||||
cmd |= 0x20000000;
|
||||
if (unlikely(cmd != req->suffix0)) {
|
||||
if (!nvbo->kmap.virtual) {
|
||||
@ -850,7 +828,7 @@ out_next:
|
||||
req->suffix1 = 0x00000000;
|
||||
} else {
|
||||
req->suffix0 = 0x20000000 |
|
||||
(chan->push.vma.offset + ((chan->dma.cur + 2) << 2));
|
||||
(chan->push.addr + ((chan->dma.cur + 2) << 2));
|
||||
req->suffix1 = 0x00000000;
|
||||
}
|
||||
|
||||
|
128
drivers/gpu/drm/nouveau/nouveau_vmm.c
Normal file
128
drivers/gpu/drm/nouveau/nouveau_vmm.c
Normal file
@ -0,0 +1,128 @@
|
||||
/*
|
||||
* Copyright 2017 Red Hat Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
#include "nouveau_vmm.h"
|
||||
#include "nouveau_drv.h"
|
||||
#include "nouveau_bo.h"
|
||||
#include "nouveau_mem.h"
|
||||
|
||||
void
|
||||
nouveau_vma_unmap(struct nouveau_vma *vma)
|
||||
{
|
||||
if (vma->mem) {
|
||||
nvkm_vm_unmap(&vma->_vma);
|
||||
vma->mem = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
int
|
||||
nouveau_vma_map(struct nouveau_vma *vma, struct nouveau_mem *mem)
|
||||
{
|
||||
int ret = nouveau_mem_map(mem, vma->vmm->vm, &vma->_vma);
|
||||
if (ret)
|
||||
return ret;
|
||||
vma->mem = mem;
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct nouveau_vma *
|
||||
nouveau_vma_find(struct nouveau_bo *nvbo, struct nouveau_vmm *vmm)
|
||||
{
|
||||
struct nouveau_vma *vma;
|
||||
|
||||
list_for_each_entry(vma, &nvbo->vma_list, head) {
|
||||
if (vma->vmm == vmm)
|
||||
return vma;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void
|
||||
nouveau_vma_del(struct nouveau_vma **pvma)
|
||||
{
|
||||
struct nouveau_vma *vma = *pvma;
|
||||
if (vma && --vma->refs <= 0) {
|
||||
if (likely(vma->addr != ~0ULL)) {
|
||||
nouveau_vma_unmap(vma);
|
||||
nvkm_vm_put(&vma->_vma);
|
||||
}
|
||||
list_del(&vma->head);
|
||||
*pvma = NULL;
|
||||
kfree(*pvma);
|
||||
}
|
||||
}
|
||||
|
||||
int
|
||||
nouveau_vma_new(struct nouveau_bo *nvbo, struct nouveau_vmm *vmm,
|
||||
struct nouveau_vma **pvma)
|
||||
{
|
||||
struct nouveau_mem *mem = nouveau_mem(&nvbo->bo.mem);
|
||||
struct nouveau_vma *vma;
|
||||
int ret;
|
||||
|
||||
if ((vma = *pvma = nouveau_vma_find(nvbo, vmm))) {
|
||||
vma->refs++;
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (!(vma = *pvma = kmalloc(sizeof(*vma), GFP_KERNEL)))
|
||||
return -ENOMEM;
|
||||
vma->vmm = vmm;
|
||||
vma->refs = 1;
|
||||
vma->addr = ~0ULL;
|
||||
vma->mem = NULL;
|
||||
list_add_tail(&vma->head, &nvbo->vma_list);
|
||||
|
||||
if (nvbo->bo.mem.mem_type != TTM_PL_SYSTEM &&
|
||||
mem->mem.page == nvbo->page) {
|
||||
ret = nvkm_vm_get(vmm->vm, mem->_mem->size << 12, mem->mem.page,
|
||||
NV_MEM_ACCESS_RW, &vma->_vma);
|
||||
if (ret)
|
||||
goto done;
|
||||
|
||||
vma->addr = vma->_vma.offset;
|
||||
ret = nouveau_vma_map(vma, mem);
|
||||
} else {
|
||||
ret = nvkm_vm_get(vmm->vm, mem->_mem->size << 12, mem->mem.page,
|
||||
NV_MEM_ACCESS_RW, &vma->_vma);
|
||||
vma->addr = vma->_vma.offset;
|
||||
}
|
||||
|
||||
done:
|
||||
if (ret)
|
||||
nouveau_vma_del(pvma);
|
||||
return ret;
|
||||
}
|
||||
|
||||
void
|
||||
nouveau_vmm_fini(struct nouveau_vmm *vmm)
|
||||
{
|
||||
nvkm_vm_ref(NULL, &vmm->vm, NULL);
|
||||
}
|
||||
|
||||
int
|
||||
nouveau_vmm_init(struct nouveau_cli *cli, s32 oclass, struct nouveau_vmm *vmm)
|
||||
{
|
||||
vmm->cli = cli;
|
||||
return nvkm_vm_new(nvxx_device(&cli->device), 0, (1ULL << 40),
|
||||
0x1000, NULL, &vmm->vm);
|
||||
}
|
32
drivers/gpu/drm/nouveau/nouveau_vmm.h
Normal file
32
drivers/gpu/drm/nouveau/nouveau_vmm.h
Normal file
@ -0,0 +1,32 @@
|
||||
#ifndef __NOUVEAU_VMA_H__
|
||||
#define __NOUVEAU_VMA_H__
|
||||
#include <subdev/mmu.h>
|
||||
struct nouveau_bo;
|
||||
struct nouveau_mem;
|
||||
|
||||
struct nouveau_vma {
|
||||
struct nouveau_vmm *vmm;
|
||||
int refs;
|
||||
struct list_head head;
|
||||
u64 addr;
|
||||
|
||||
struct nouveau_mem *mem;
|
||||
|
||||
struct nvkm_vma _vma;
|
||||
};
|
||||
|
||||
struct nouveau_vma *nouveau_vma_find(struct nouveau_bo *, struct nouveau_vmm *);
|
||||
int nouveau_vma_new(struct nouveau_bo *, struct nouveau_vmm *,
|
||||
struct nouveau_vma **);
|
||||
void nouveau_vma_del(struct nouveau_vma **);
|
||||
int nouveau_vma_map(struct nouveau_vma *, struct nouveau_mem *);
|
||||
void nouveau_vma_unmap(struct nouveau_vma *);
|
||||
|
||||
struct nouveau_vmm {
|
||||
struct nouveau_cli *cli;
|
||||
struct nvkm_vm *vm;
|
||||
};
|
||||
|
||||
int nouveau_vmm_init(struct nouveau_cli *, s32 oclass, struct nouveau_vmm *);
|
||||
void nouveau_vmm_fini(struct nouveau_vmm *);
|
||||
#endif
|
@ -25,6 +25,7 @@
|
||||
#include "nouveau_drv.h"
|
||||
#include "nouveau_dma.h"
|
||||
#include "nouveau_fbcon.h"
|
||||
#include "nouveau_vmm.h"
|
||||
|
||||
int
|
||||
nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
|
||||
@ -239,8 +240,8 @@ nv50_fbcon_accel_init(struct fb_info *info)
|
||||
OUT_RING(chan, info->fix.line_length);
|
||||
OUT_RING(chan, info->var.xres_virtual);
|
||||
OUT_RING(chan, info->var.yres_virtual);
|
||||
OUT_RING(chan, upper_32_bits(fb->vma.offset));
|
||||
OUT_RING(chan, lower_32_bits(fb->vma.offset));
|
||||
OUT_RING(chan, upper_32_bits(fb->vma->addr));
|
||||
OUT_RING(chan, lower_32_bits(fb->vma->addr));
|
||||
BEGIN_NV04(chan, NvSub2D, 0x0230, 2);
|
||||
OUT_RING(chan, format);
|
||||
OUT_RING(chan, 1);
|
||||
@ -248,8 +249,8 @@ nv50_fbcon_accel_init(struct fb_info *info)
|
||||
OUT_RING(chan, info->fix.line_length);
|
||||
OUT_RING(chan, info->var.xres_virtual);
|
||||
OUT_RING(chan, info->var.yres_virtual);
|
||||
OUT_RING(chan, upper_32_bits(fb->vma.offset));
|
||||
OUT_RING(chan, lower_32_bits(fb->vma.offset));
|
||||
OUT_RING(chan, upper_32_bits(fb->vma->addr));
|
||||
OUT_RING(chan, lower_32_bits(fb->vma->addr));
|
||||
FIRE_RING(chan);
|
||||
|
||||
return 0;
|
||||
|
@ -25,6 +25,7 @@
|
||||
#include "nouveau_drv.h"
|
||||
#include "nouveau_dma.h"
|
||||
#include "nouveau_fence.h"
|
||||
#include "nouveau_vmm.h"
|
||||
|
||||
#include "nv50_display.h"
|
||||
|
||||
@ -68,7 +69,7 @@ nv84_fence_emit(struct nouveau_fence *fence)
|
||||
{
|
||||
struct nouveau_channel *chan = fence->channel;
|
||||
struct nv84_fence_chan *fctx = chan->fence;
|
||||
u64 addr = fctx->vma.offset + chan->chid * 16;
|
||||
u64 addr = fctx->vma->addr + chan->chid * 16;
|
||||
|
||||
return fctx->base.emit32(chan, addr, fence->base.seqno);
|
||||
}
|
||||
@ -78,7 +79,7 @@ nv84_fence_sync(struct nouveau_fence *fence,
|
||||
struct nouveau_channel *prev, struct nouveau_channel *chan)
|
||||
{
|
||||
struct nv84_fence_chan *fctx = chan->fence;
|
||||
u64 addr = fctx->vma.offset + prev->chid * 16;
|
||||
u64 addr = fctx->vma->addr + prev->chid * 16;
|
||||
|
||||
return fctx->base.sync32(chan, addr, fence->base.seqno);
|
||||
}
|
||||
@ -98,7 +99,7 @@ nv84_fence_context_del(struct nouveau_channel *chan)
|
||||
|
||||
nouveau_bo_wr32(priv->bo, chan->chid * 16 / 4, fctx->base.sequence);
|
||||
mutex_lock(&priv->mutex);
|
||||
nouveau_bo_vma_del(priv->bo, &fctx->vma);
|
||||
nouveau_vma_del(&fctx->vma);
|
||||
mutex_unlock(&priv->mutex);
|
||||
nouveau_fence_context_del(&fctx->base);
|
||||
chan->fence = NULL;
|
||||
@ -126,7 +127,7 @@ nv84_fence_context_new(struct nouveau_channel *chan)
|
||||
fctx->base.sequence = nv84_fence_read(chan);
|
||||
|
||||
mutex_lock(&priv->mutex);
|
||||
ret = nouveau_bo_vma_add(priv->bo, cli->vm, &fctx->vma);
|
||||
ret = nouveau_vma_new(priv->bo, &cli->vmm, &fctx->vma);
|
||||
mutex_unlock(&priv->mutex);
|
||||
|
||||
if (ret)
|
||||
|
@ -25,6 +25,7 @@
|
||||
#include "nouveau_drv.h"
|
||||
#include "nouveau_dma.h"
|
||||
#include "nouveau_fbcon.h"
|
||||
#include "nouveau_vmm.h"
|
||||
|
||||
int
|
||||
nvc0_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
|
||||
@ -239,8 +240,8 @@ nvc0_fbcon_accel_init(struct fb_info *info)
|
||||
OUT_RING (chan, info->fix.line_length);
|
||||
OUT_RING (chan, info->var.xres_virtual);
|
||||
OUT_RING (chan, info->var.yres_virtual);
|
||||
OUT_RING (chan, upper_32_bits(fb->vma.offset));
|
||||
OUT_RING (chan, lower_32_bits(fb->vma.offset));
|
||||
OUT_RING (chan, upper_32_bits(fb->vma->addr));
|
||||
OUT_RING (chan, lower_32_bits(fb->vma->addr));
|
||||
BEGIN_NVC0(chan, NvSub2D, 0x0230, 10);
|
||||
OUT_RING (chan, format);
|
||||
OUT_RING (chan, 1);
|
||||
@ -250,8 +251,8 @@ nvc0_fbcon_accel_init(struct fb_info *info)
|
||||
OUT_RING (chan, info->fix.line_length);
|
||||
OUT_RING (chan, info->var.xres_virtual);
|
||||
OUT_RING (chan, info->var.yres_virtual);
|
||||
OUT_RING (chan, upper_32_bits(fb->vma.offset));
|
||||
OUT_RING (chan, lower_32_bits(fb->vma.offset));
|
||||
OUT_RING (chan, upper_32_bits(fb->vma->addr));
|
||||
OUT_RING (chan, lower_32_bits(fb->vma->addr));
|
||||
FIRE_RING (chan);
|
||||
|
||||
return 0;
|
||||
|
Loading…
Reference in New Issue
Block a user