forked from Minki/linux
Merge remote branch 'nouveau/drm-nouveau-next' of /ssd/git/drm-nouveau-next into drm-core-next
* 'nouveau/drm-nouveau-next' of /ssd/git/drm-nouveau-next: drm/nvc0: accelerate ttm buffer moves drm/nvc0: initial support for tiled buffer objects drm/nvc0: implement fbcon acceleration drm/nvc0: implement pgraph engine hooks drm/nvc0: implement pfifo engine hooks drm/nvc0: implement fencing drm/nvc0: fix channel dma init paths drm/nvc0: skip dma object creation for drm channel drm/nvc0: implement channel structure initialisation drm/nvc0: gpuobj_new need only check validity and init the relevant engine drm/nvc0: reject the notifier_alloc ioctl drm/nvc0: create shared channel vm drm/nvc0: initial vm implementation, use for bar1/bar3 management drm/nvc0: import initial vm backend drm/nouveau: modify vm to accomodate dual page tables for nvc0 drm/nv50: add missing license header to nv50_fbcon.c drm/nv50: fix smatch warning in nv50_vram.c drm/nouveau: sizeof() vs ARRAY_SIZE()
This commit is contained in:
commit
1d99e5c572
@ -18,17 +18,19 @@ nouveau-y := nouveau_drv.o nouveau_state.o nouveau_channel.o nouveau_mem.o \
|
||||
nv04_fifo.o nv10_fifo.o nv40_fifo.o nv50_fifo.o nvc0_fifo.o \
|
||||
nv04_graph.o nv10_graph.o nv20_graph.o \
|
||||
nv40_graph.o nv50_graph.o nvc0_graph.o \
|
||||
nv40_grctx.o nv50_grctx.o \
|
||||
nv40_grctx.o nv50_grctx.o nvc0_grctx.o \
|
||||
nv84_crypt.o \
|
||||
nv04_instmem.o nv50_instmem.o nvc0_instmem.o \
|
||||
nv50_evo.o nv50_crtc.o nv50_dac.o nv50_sor.o \
|
||||
nv50_cursor.o nv50_display.o nv50_fbcon.o \
|
||||
nv50_cursor.o nv50_display.o \
|
||||
nv04_dac.o nv04_dfp.o nv04_tv.o nv17_tv.o nv17_tv_modes.o \
|
||||
nv04_crtc.o nv04_display.o nv04_cursor.o nv04_fbcon.o \
|
||||
nv04_crtc.o nv04_display.o nv04_cursor.o \
|
||||
nv04_fbcon.o nv50_fbcon.o nvc0_fbcon.o \
|
||||
nv10_gpio.o nv50_gpio.o \
|
||||
nv50_calc.o \
|
||||
nv04_pm.o nv50_pm.o nva3_pm.o \
|
||||
nv50_vram.o nv50_vm.o
|
||||
nv50_vram.o nvc0_vram.o \
|
||||
nv50_vm.o nvc0_vm.o
|
||||
|
||||
nouveau-$(CONFIG_DRM_NOUVEAU_DEBUG) += nouveau_debugfs.o
|
||||
nouveau-$(CONFIG_COMPAT) += nouveau_ioc32.o
|
||||
|
@ -413,7 +413,7 @@ nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
|
||||
man->default_caching = TTM_PL_FLAG_CACHED;
|
||||
break;
|
||||
case TTM_PL_VRAM:
|
||||
if (dev_priv->card_type == NV_50) {
|
||||
if (dev_priv->card_type >= NV_50) {
|
||||
man->func = &nouveau_vram_manager;
|
||||
man->io_reserve_fastpath = false;
|
||||
man->use_io_reserve_lru = true;
|
||||
@ -514,6 +514,58 @@ nouveau_bo_mem_ctxdma(struct ttm_buffer_object *bo,
|
||||
return chan->vram_handle;
|
||||
}
|
||||
|
||||
static int
|
||||
nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
|
||||
struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
|
||||
struct nouveau_bo *nvbo = nouveau_bo(bo);
|
||||
u64 src_offset = old_mem->start << PAGE_SHIFT;
|
||||
u64 dst_offset = new_mem->start << PAGE_SHIFT;
|
||||
u32 page_count = new_mem->num_pages;
|
||||
int ret;
|
||||
|
||||
if (!nvbo->no_vm) {
|
||||
if (old_mem->mem_type == TTM_PL_VRAM)
|
||||
src_offset = nvbo->vma.offset;
|
||||
else
|
||||
src_offset += dev_priv->gart_info.aper_base;
|
||||
|
||||
if (new_mem->mem_type == TTM_PL_VRAM)
|
||||
dst_offset = nvbo->vma.offset;
|
||||
else
|
||||
dst_offset += dev_priv->gart_info.aper_base;
|
||||
}
|
||||
|
||||
page_count = new_mem->num_pages;
|
||||
while (page_count) {
|
||||
int line_count = (page_count > 2047) ? 2047 : page_count;
|
||||
|
||||
ret = RING_SPACE(chan, 12);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
BEGIN_NVC0(chan, 2, NvSubM2MF, 0x0238, 2);
|
||||
OUT_RING (chan, upper_32_bits(dst_offset));
|
||||
OUT_RING (chan, lower_32_bits(dst_offset));
|
||||
BEGIN_NVC0(chan, 2, NvSubM2MF, 0x030c, 6);
|
||||
OUT_RING (chan, upper_32_bits(src_offset));
|
||||
OUT_RING (chan, lower_32_bits(src_offset));
|
||||
OUT_RING (chan, PAGE_SIZE); /* src_pitch */
|
||||
OUT_RING (chan, PAGE_SIZE); /* dst_pitch */
|
||||
OUT_RING (chan, PAGE_SIZE); /* line_length */
|
||||
OUT_RING (chan, line_count);
|
||||
BEGIN_NVC0(chan, 2, NvSubM2MF, 0x0300, 1);
|
||||
OUT_RING (chan, 0x00100110);
|
||||
|
||||
page_count -= line_count;
|
||||
src_offset += (PAGE_SIZE * line_count);
|
||||
dst_offset += (PAGE_SIZE * line_count);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
|
||||
struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
|
||||
@ -690,7 +742,10 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
|
||||
if (dev_priv->card_type < NV_50)
|
||||
ret = nv04_bo_move_m2mf(chan, bo, &bo->mem, new_mem);
|
||||
else
|
||||
if (dev_priv->card_type < NV_C0)
|
||||
ret = nv50_bo_move_m2mf(chan, bo, &bo->mem, new_mem);
|
||||
else
|
||||
ret = nvc0_bo_move_m2mf(chan, bo, &bo->mem, new_mem);
|
||||
if (ret == 0) {
|
||||
ret = nouveau_bo_move_accel_cleanup(chan, nvbo, evict,
|
||||
no_wait_reserve,
|
||||
@ -901,6 +956,7 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
|
||||
case TTM_PL_VRAM:
|
||||
{
|
||||
struct nouveau_vram *vram = mem->mm_node;
|
||||
u8 page_shift;
|
||||
|
||||
if (!dev_priv->bar1_vm) {
|
||||
mem->bus.offset = mem->start << PAGE_SHIFT;
|
||||
@ -909,8 +965,14 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
|
||||
break;
|
||||
}
|
||||
|
||||
ret = nouveau_vm_get(dev_priv->bar1_vm, mem->bus.size, 12,
|
||||
NV_MEM_ACCESS_RW, &vram->bar_vma);
|
||||
if (dev_priv->card_type == NV_C0)
|
||||
page_shift = vram->page_shift;
|
||||
else
|
||||
page_shift = 12;
|
||||
|
||||
ret = nouveau_vm_get(dev_priv->bar1_vm, mem->bus.size,
|
||||
page_shift, NV_MEM_ACCESS_RW,
|
||||
&vram->bar_vma);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
@ -920,8 +982,9 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
|
||||
return ret;
|
||||
}
|
||||
|
||||
mem->bus.offset = vram->bar_vma.offset;
|
||||
mem->bus.offset -= 0x0020000000ULL;
|
||||
mem->bus.offset = vram->bar_vma.offset;
|
||||
if (dev_priv->card_type == NV_50) /*XXX*/
|
||||
mem->bus.offset -= 0x0020000000ULL;
|
||||
mem->bus.base = pci_resource_start(dev->pdev, 1);
|
||||
mem->bus.is_iomem = true;
|
||||
}
|
||||
|
@ -38,9 +38,14 @@ nouveau_channel_pushbuf_ctxdma_init(struct nouveau_channel *chan)
|
||||
int ret;
|
||||
|
||||
if (dev_priv->card_type >= NV_50) {
|
||||
ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 0,
|
||||
(1ULL << 40), NV_MEM_ACCESS_RO,
|
||||
NV_MEM_TARGET_VM, &pushbuf);
|
||||
if (dev_priv->card_type < NV_C0) {
|
||||
ret = nouveau_gpuobj_dma_new(chan,
|
||||
NV_CLASS_DMA_IN_MEMORY, 0,
|
||||
(1ULL << 40),
|
||||
NV_MEM_ACCESS_RO,
|
||||
NV_MEM_TARGET_VM,
|
||||
&pushbuf);
|
||||
}
|
||||
chan->pushbuf_base = pb->bo.offset;
|
||||
} else
|
||||
if (pb->bo.mem.mem_type == TTM_PL_TT) {
|
||||
@ -71,7 +76,7 @@ nouveau_channel_pushbuf_ctxdma_init(struct nouveau_channel *chan)
|
||||
|
||||
nouveau_gpuobj_ref(pushbuf, &chan->pushbuf);
|
||||
nouveau_gpuobj_ref(NULL, &pushbuf);
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static struct nouveau_bo *
|
||||
@ -99,6 +104,13 @@ nouveau_channel_user_pushbuf_alloc(struct drm_device *dev)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
ret = nouveau_bo_map(pushbuf);
|
||||
if (ret) {
|
||||
nouveau_bo_unpin(pushbuf);
|
||||
nouveau_bo_ref(NULL, &pushbuf);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return pushbuf;
|
||||
}
|
||||
|
||||
|
@ -36,7 +36,7 @@ nouveau_dma_pre_init(struct nouveau_channel *chan)
|
||||
struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
|
||||
struct nouveau_bo *pushbuf = chan->pushbuf_bo;
|
||||
|
||||
if (dev_priv->card_type == NV_50) {
|
||||
if (dev_priv->card_type >= NV_50) {
|
||||
const int ib_size = pushbuf->bo.mem.size / 2;
|
||||
|
||||
chan->dma.ib_base = (pushbuf->bo.mem.size - ib_size) >> 2;
|
||||
@ -61,6 +61,21 @@ nouveau_dma_init(struct nouveau_channel *chan)
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
int ret, i;
|
||||
|
||||
if (dev_priv->card_type >= NV_C0) {
|
||||
ret = nouveau_gpuobj_gr_new(chan, 0x9039, 0x9039);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = RING_SPACE(chan, 2);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
BEGIN_NVC0(chan, 2, NvSubM2MF, 0x0000, 1);
|
||||
OUT_RING (chan, 0x00009039);
|
||||
FIRE_RING (chan);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Create NV_MEMORY_TO_MEMORY_FORMAT for buffer moves */
|
||||
ret = nouveau_gpuobj_gr_new(chan, NvM2MF, dev_priv->card_type < NV_50 ?
|
||||
0x0039 : 0x5039);
|
||||
@ -72,11 +87,6 @@ nouveau_dma_init(struct nouveau_channel *chan)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* Map push buffer */
|
||||
ret = nouveau_bo_map(chan->pushbuf_bo);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* Insert NOPS for NOUVEAU_DMA_SKIPS */
|
||||
ret = RING_SPACE(chan, NOUVEAU_DMA_SKIPS);
|
||||
if (ret)
|
||||
|
@ -77,7 +77,8 @@ enum {
|
||||
/* G80+ display objects */
|
||||
NvEvoVRAM = 0x01000000,
|
||||
NvEvoFB16 = 0x01000001,
|
||||
NvEvoFB32 = 0x01000002
|
||||
NvEvoFB32 = 0x01000002,
|
||||
NvEvoVRAM_LP = 0x01000003
|
||||
};
|
||||
|
||||
#define NV_MEMORY_TO_MEMORY_FORMAT 0x00000039
|
||||
@ -124,6 +125,12 @@ OUT_RING(struct nouveau_channel *chan, int data)
|
||||
extern void
|
||||
OUT_RINGp(struct nouveau_channel *chan, const void *data, unsigned nr_dwords);
|
||||
|
||||
static inline void
|
||||
BEGIN_NVC0(struct nouveau_channel *chan, int op, int subc, int mthd, int size)
|
||||
{
|
||||
OUT_RING(chan, (op << 28) | (size << 16) | (subc << 13) | (mthd >> 2));
|
||||
}
|
||||
|
||||
static inline void
|
||||
BEGIN_RING(struct nouveau_channel *chan, int subc, int mthd, int size)
|
||||
{
|
||||
|
@ -69,6 +69,7 @@ struct nouveau_vram {
|
||||
struct drm_device *dev;
|
||||
|
||||
struct nouveau_vma bar_vma;
|
||||
u8 page_shift;
|
||||
|
||||
struct list_head regions;
|
||||
u32 memtype;
|
||||
@ -239,6 +240,7 @@ struct nouveau_channel {
|
||||
/* PFIFO context */
|
||||
struct nouveau_gpuobj *ramfc;
|
||||
struct nouveau_gpuobj *cache;
|
||||
void *fifo_priv;
|
||||
|
||||
/* PGRAPH context */
|
||||
/* XXX may be merge 2 pointers as private data ??? */
|
||||
@ -336,6 +338,7 @@ struct nouveau_fb_engine {
|
||||
};
|
||||
|
||||
struct nouveau_fifo_engine {
|
||||
void *priv;
|
||||
int channels;
|
||||
|
||||
struct nouveau_gpuobj *playlist[2];
|
||||
@ -362,6 +365,7 @@ struct nouveau_pgraph_engine {
|
||||
bool accel_blocked;
|
||||
bool registered;
|
||||
int grctx_size;
|
||||
void *priv;
|
||||
|
||||
/* NV2x/NV3x context table (0x400780) */
|
||||
struct nouveau_gpuobj *ctx_table;
|
||||
@ -841,6 +845,9 @@ extern void nv10_mem_put_tile_region(struct drm_device *dev,
|
||||
struct nouveau_fence *fence);
|
||||
extern const struct ttm_mem_type_manager_func nouveau_vram_manager;
|
||||
|
||||
/* nvc0_vram.c */
|
||||
extern const struct ttm_mem_type_manager_func nvc0_vram_manager;
|
||||
|
||||
/* nouveau_notifier.c */
|
||||
extern int nouveau_notifier_init_channel(struct nouveau_channel *);
|
||||
extern void nouveau_notifier_takedown_channel(struct nouveau_channel *);
|
||||
@ -1228,11 +1235,6 @@ extern int nvc0_instmem_init(struct drm_device *);
|
||||
extern void nvc0_instmem_takedown(struct drm_device *);
|
||||
extern int nvc0_instmem_suspend(struct drm_device *);
|
||||
extern void nvc0_instmem_resume(struct drm_device *);
|
||||
extern int nvc0_instmem_get(struct nouveau_gpuobj *, u32 size, u32 align);
|
||||
extern void nvc0_instmem_put(struct nouveau_gpuobj *);
|
||||
extern int nvc0_instmem_map(struct nouveau_gpuobj *);
|
||||
extern void nvc0_instmem_unmap(struct nouveau_gpuobj *);
|
||||
extern void nvc0_instmem_flush(struct drm_device *);
|
||||
|
||||
/* nv04_mc.c */
|
||||
extern int nv04_mc_init(struct drm_device *);
|
||||
|
@ -68,6 +68,8 @@ nouveau_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
|
||||
else
|
||||
if (dev_priv->card_type < NV_C0)
|
||||
ret = nv50_fbcon_fillrect(info, rect);
|
||||
else
|
||||
ret = nvc0_fbcon_fillrect(info, rect);
|
||||
mutex_unlock(&dev_priv->channel->mutex);
|
||||
}
|
||||
|
||||
@ -98,6 +100,8 @@ nouveau_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *image)
|
||||
else
|
||||
if (dev_priv->card_type < NV_C0)
|
||||
ret = nv50_fbcon_copyarea(info, image);
|
||||
else
|
||||
ret = nvc0_fbcon_copyarea(info, image);
|
||||
mutex_unlock(&dev_priv->channel->mutex);
|
||||
}
|
||||
|
||||
@ -128,6 +132,8 @@ nouveau_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
|
||||
else
|
||||
if (dev_priv->card_type < NV_C0)
|
||||
ret = nv50_fbcon_imageblit(info, image);
|
||||
else
|
||||
ret = nvc0_fbcon_imageblit(info, image);
|
||||
mutex_unlock(&dev_priv->channel->mutex);
|
||||
}
|
||||
|
||||
@ -163,10 +169,18 @@ nouveau_fbcon_sync(struct fb_info *info)
|
||||
return 0;
|
||||
}
|
||||
|
||||
BEGIN_RING(chan, 0, 0x0104, 1);
|
||||
OUT_RING(chan, 0);
|
||||
BEGIN_RING(chan, 0, 0x0100, 1);
|
||||
OUT_RING(chan, 0);
|
||||
if (dev_priv->card_type >= NV_C0) {
|
||||
BEGIN_NVC0(chan, 2, NvSub2D, 0x010c, 1);
|
||||
OUT_RING (chan, 0);
|
||||
BEGIN_NVC0(chan, 2, NvSub2D, 0x0100, 1);
|
||||
OUT_RING (chan, 0);
|
||||
} else {
|
||||
BEGIN_RING(chan, 0, 0x0104, 1);
|
||||
OUT_RING (chan, 0);
|
||||
BEGIN_RING(chan, 0, 0x0100, 1);
|
||||
OUT_RING (chan, 0);
|
||||
}
|
||||
|
||||
nouveau_bo_wr32(chan->notifier_bo, chan->m2mf_ntfy + 3, 0xffffffff);
|
||||
FIRE_RING(chan);
|
||||
mutex_unlock(&chan->mutex);
|
||||
@ -370,6 +384,8 @@ nouveau_fbcon_create(struct nouveau_fbdev *nfbdev,
|
||||
else
|
||||
if (dev_priv->card_type < NV_C0)
|
||||
ret = nv50_fbcon_accel_init(info);
|
||||
else
|
||||
ret = nvc0_fbcon_accel_init(info);
|
||||
|
||||
if (ret == 0)
|
||||
info->fbops = &nouveau_fbcon_ops;
|
||||
|
@ -44,11 +44,17 @@ int nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region);
|
||||
int nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect);
|
||||
int nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image);
|
||||
int nv04_fbcon_accel_init(struct fb_info *info);
|
||||
|
||||
int nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect);
|
||||
int nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region);
|
||||
int nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image);
|
||||
int nv50_fbcon_accel_init(struct fb_info *info);
|
||||
|
||||
int nvc0_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect);
|
||||
int nvc0_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region);
|
||||
int nvc0_fbcon_imageblit(struct fb_info *info, const struct fb_image *image);
|
||||
int nvc0_fbcon_accel_init(struct fb_info *info);
|
||||
|
||||
void nouveau_fbcon_gpu_lockup(struct fb_info *info);
|
||||
|
||||
int nouveau_fbcon_init(struct drm_device *dev);
|
||||
|
@ -32,7 +32,8 @@
|
||||
#include "nouveau_dma.h"
|
||||
|
||||
#define USE_REFCNT(dev) (nouveau_private(dev)->chipset >= 0x10)
|
||||
#define USE_SEMA(dev) (nouveau_private(dev)->chipset >= 0x17)
|
||||
#define USE_SEMA(dev) (nouveau_private(dev)->chipset >= 0x17 && \
|
||||
nouveau_private(dev)->card_type < NV_C0)
|
||||
|
||||
struct nouveau_fence {
|
||||
struct nouveau_channel *channel;
|
||||
@ -139,6 +140,7 @@ nouveau_fence_emit(struct nouveau_fence *fence)
|
||||
{
|
||||
struct nouveau_channel *chan = fence->channel;
|
||||
struct drm_device *dev = chan->dev;
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
int ret;
|
||||
|
||||
ret = RING_SPACE(chan, 2);
|
||||
@ -159,8 +161,15 @@ nouveau_fence_emit(struct nouveau_fence *fence)
|
||||
list_add_tail(&fence->entry, &chan->fence.pending);
|
||||
spin_unlock(&chan->fence.lock);
|
||||
|
||||
BEGIN_RING(chan, NvSubSw, USE_REFCNT(dev) ? 0x0050 : 0x0150, 1);
|
||||
OUT_RING(chan, fence->sequence);
|
||||
if (USE_REFCNT(dev)) {
|
||||
if (dev_priv->card_type < NV_C0)
|
||||
BEGIN_RING(chan, NvSubSw, 0x0050, 1);
|
||||
else
|
||||
BEGIN_NVC0(chan, 2, NvSubSw, 0x0050, 1);
|
||||
} else {
|
||||
BEGIN_RING(chan, NvSubSw, 0x0150, 1);
|
||||
}
|
||||
OUT_RING (chan, fence->sequence);
|
||||
FIRE_RING(chan);
|
||||
|
||||
return 0;
|
||||
@ -445,11 +454,14 @@ nouveau_fence_channel_init(struct nouveau_channel *chan)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = RING_SPACE(chan, 2);
|
||||
if (ret)
|
||||
return ret;
|
||||
BEGIN_RING(chan, NvSubSw, 0, 1);
|
||||
OUT_RING(chan, NvSw);
|
||||
/* we leave subchannel empty for nvc0 */
|
||||
if (dev_priv->card_type < NV_C0) {
|
||||
ret = RING_SPACE(chan, 2);
|
||||
if (ret)
|
||||
return ret;
|
||||
BEGIN_RING(chan, NvSubSw, 0, 1);
|
||||
OUT_RING(chan, NvSw);
|
||||
}
|
||||
|
||||
/* Create a DMA object for the shared cross-channel sync area. */
|
||||
if (USE_SEMA(dev)) {
|
||||
|
@ -255,9 +255,6 @@ nouveau_mem_detect(struct drm_device *dev)
|
||||
if (dev_priv->card_type < NV_50) {
|
||||
dev_priv->vram_size = nv_rd32(dev, NV04_PFB_FIFO_DATA);
|
||||
dev_priv->vram_size &= NV10_PFB_FIFO_DATA_RAM_AMOUNT_MB_MASK;
|
||||
} else {
|
||||
dev_priv->vram_size = nv_rd32(dev, 0x10f20c) << 20;
|
||||
dev_priv->vram_size *= nv_rd32(dev, 0x121c74);
|
||||
}
|
||||
|
||||
if (dev_priv->vram_size)
|
||||
@ -731,6 +728,10 @@ nouveau_vram_manager_new(struct ttm_mem_type_manager *man,
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
node->page_shift = 12;
|
||||
if (nvbo->vma.node)
|
||||
node->page_shift = nvbo->vma.node->type;
|
||||
|
||||
mem->mm_node = node;
|
||||
mem->start = node->offset >> PAGE_SHIFT;
|
||||
return 0;
|
||||
|
@ -59,4 +59,9 @@ int nv50_vram_new(struct drm_device *, u64 size, u32 align, u32 size_nc,
|
||||
void nv50_vram_del(struct drm_device *, struct nouveau_vram **);
|
||||
bool nv50_vram_flags_valid(struct drm_device *, u32 tile_flags);
|
||||
|
||||
int nvc0_vram_init(struct drm_device *);
|
||||
int nvc0_vram_new(struct drm_device *, u64 size, u32 align, u32 ncmin,
|
||||
u32 memtype, struct nouveau_vram **);
|
||||
bool nvc0_vram_flags_valid(struct drm_device *, u32 tile_flags);
|
||||
|
||||
#endif
|
||||
|
@ -164,10 +164,15 @@ int
|
||||
nouveau_ioctl_notifier_alloc(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct drm_nouveau_notifierobj_alloc *na = data;
|
||||
struct nouveau_channel *chan;
|
||||
int ret;
|
||||
|
||||
/* completely unnecessary for these chipsets... */
|
||||
if (unlikely(dev_priv->card_type >= NV_C0))
|
||||
return -EINVAL;
|
||||
|
||||
chan = nouveau_channel_get(dev, file_priv, na->channel);
|
||||
if (IS_ERR(chan))
|
||||
return PTR_ERR(chan);
|
||||
|
@ -643,10 +643,13 @@ nouveau_gpuobj_gr_new(struct nouveau_channel *chan, u32 handle, int class)
|
||||
found:
|
||||
switch (oc->engine) {
|
||||
case NVOBJ_ENGINE_SW:
|
||||
ret = nouveau_gpuobj_sw_new(chan, class, &gpuobj);
|
||||
if (ret)
|
||||
return ret;
|
||||
goto insert;
|
||||
if (dev_priv->card_type < NV_C0) {
|
||||
ret = nouveau_gpuobj_sw_new(chan, class, &gpuobj);
|
||||
if (ret)
|
||||
return ret;
|
||||
goto insert;
|
||||
}
|
||||
break;
|
||||
case NVOBJ_ENGINE_GR:
|
||||
if (dev_priv->card_type >= NV_50 && !chan->ramin_grctx) {
|
||||
struct nouveau_pgraph_engine *pgraph =
|
||||
@ -669,6 +672,10 @@ found:
|
||||
break;
|
||||
}
|
||||
|
||||
/* we're done if this is fermi */
|
||||
if (dev_priv->card_type >= NV_C0)
|
||||
return 0;
|
||||
|
||||
ret = nouveau_gpuobj_new(dev, chan,
|
||||
nouveau_gpuobj_class_instmem_size(dev, class),
|
||||
16,
|
||||
@ -772,6 +779,25 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
|
||||
|
||||
NV_DEBUG(dev, "ch%d vram=0x%08x tt=0x%08x\n", chan->id, vram_h, tt_h);
|
||||
|
||||
if (dev_priv->card_type == NV_C0) {
|
||||
struct nouveau_vm *vm = dev_priv->chan_vm;
|
||||
struct nouveau_vm_pgd *vpgd;
|
||||
|
||||
ret = nouveau_gpuobj_new(dev, NULL, 4096, 0x1000, 0,
|
||||
&chan->ramin);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
nouveau_vm_ref(vm, &chan->vm, NULL);
|
||||
|
||||
vpgd = list_first_entry(&vm->pgd_list, struct nouveau_vm_pgd, head);
|
||||
nv_wo32(chan->ramin, 0x0200, lower_32_bits(vpgd->obj->vinst));
|
||||
nv_wo32(chan->ramin, 0x0204, upper_32_bits(vpgd->obj->vinst));
|
||||
nv_wo32(chan->ramin, 0x0208, 0xffffffff);
|
||||
nv_wo32(chan->ramin, 0x020c, 0x000000ff);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Allocate a chunk of memory for per-channel object storage */
|
||||
ret = nouveau_gpuobj_channel_init_pramin(chan);
|
||||
if (ret) {
|
||||
@ -779,7 +805,7 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* NV50/NVC0 VM
|
||||
/* NV50 VM
|
||||
* - Allocate per-channel page-directory
|
||||
* - Link with shared channel VM
|
||||
*/
|
||||
@ -877,9 +903,6 @@ nouveau_gpuobj_channel_takedown(struct nouveau_channel *chan)
|
||||
|
||||
NV_DEBUG(dev, "ch%d\n", chan->id);
|
||||
|
||||
if (!chan->ramht)
|
||||
return;
|
||||
|
||||
nouveau_ramht_ref(NULL, &chan->ramht, chan);
|
||||
|
||||
nouveau_vm_ref(NULL, &chan->vm, chan->vm_pd);
|
||||
|
@ -464,11 +464,11 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
|
||||
engine->instmem.takedown = nvc0_instmem_takedown;
|
||||
engine->instmem.suspend = nvc0_instmem_suspend;
|
||||
engine->instmem.resume = nvc0_instmem_resume;
|
||||
engine->instmem.get = nvc0_instmem_get;
|
||||
engine->instmem.put = nvc0_instmem_put;
|
||||
engine->instmem.map = nvc0_instmem_map;
|
||||
engine->instmem.unmap = nvc0_instmem_unmap;
|
||||
engine->instmem.flush = nvc0_instmem_flush;
|
||||
engine->instmem.get = nv50_instmem_get;
|
||||
engine->instmem.put = nv50_instmem_put;
|
||||
engine->instmem.map = nv50_instmem_map;
|
||||
engine->instmem.unmap = nv50_instmem_unmap;
|
||||
engine->instmem.flush = nv84_instmem_flush;
|
||||
engine->mc.init = nv50_mc_init;
|
||||
engine->mc.takedown = nv50_mc_takedown;
|
||||
engine->timer.init = nv04_timer_init;
|
||||
@ -509,8 +509,10 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
|
||||
engine->gpio.irq_enable = nv50_gpio_irq_enable;
|
||||
engine->crypt.init = nouveau_stub_init;
|
||||
engine->crypt.takedown = nouveau_stub_takedown;
|
||||
engine->vram.init = nouveau_mem_detect;
|
||||
engine->vram.flags_valid = nouveau_mem_flags_valid;
|
||||
engine->vram.init = nvc0_vram_init;
|
||||
engine->vram.get = nvc0_vram_new;
|
||||
engine->vram.put = nv50_vram_del;
|
||||
engine->vram.flags_valid = nvc0_vram_flags_valid;
|
||||
break;
|
||||
default:
|
||||
NV_ERROR(dev, "NV%02x unsupported\n", dev_priv->chipset);
|
||||
@ -550,6 +552,10 @@ nouveau_card_init_channel(struct drm_device *dev)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* no dma objects on fermi... */
|
||||
if (dev_priv->card_type >= NV_C0)
|
||||
goto out_done;
|
||||
|
||||
ret = nouveau_gpuobj_dma_new(dev_priv->channel, NV_CLASS_DMA_IN_MEMORY,
|
||||
0, dev_priv->vram_size,
|
||||
NV_MEM_ACCESS_RW, NV_MEM_TARGET_VRAM,
|
||||
@ -574,6 +580,7 @@ nouveau_card_init_channel(struct drm_device *dev)
|
||||
if (ret)
|
||||
goto out_err;
|
||||
|
||||
out_done:
|
||||
mutex_unlock(&dev_priv->channel->mutex);
|
||||
return 0;
|
||||
|
||||
|
@ -32,6 +32,7 @@ nouveau_vm_map_at(struct nouveau_vma *vma, u64 delta, struct nouveau_vram *vram)
|
||||
{
|
||||
struct nouveau_vm *vm = vma->vm;
|
||||
struct nouveau_mm_node *r;
|
||||
int big = vma->node->type != vm->spg_shift;
|
||||
u32 offset = vma->node->offset + (delta >> 12);
|
||||
u32 bits = vma->node->type - 12;
|
||||
u32 pde = (offset >> vm->pgt_bits) - vm->fpde;
|
||||
@ -44,7 +45,7 @@ nouveau_vm_map_at(struct nouveau_vma *vma, u64 delta, struct nouveau_vram *vram)
|
||||
u32 num = r->length >> bits;
|
||||
|
||||
while (num) {
|
||||
struct nouveau_gpuobj *pgt = vm->pgt[pde].obj;
|
||||
struct nouveau_gpuobj *pgt = vm->pgt[pde].obj[big];
|
||||
|
||||
end = (pte + num);
|
||||
if (unlikely(end >= max))
|
||||
@ -76,6 +77,7 @@ nouveau_vm_map_sg(struct nouveau_vma *vma, u64 delta, u64 length,
|
||||
dma_addr_t *list)
|
||||
{
|
||||
struct nouveau_vm *vm = vma->vm;
|
||||
int big = vma->node->type != vm->spg_shift;
|
||||
u32 offset = vma->node->offset + (delta >> 12);
|
||||
u32 bits = vma->node->type - 12;
|
||||
u32 num = length >> vma->node->type;
|
||||
@ -85,7 +87,7 @@ nouveau_vm_map_sg(struct nouveau_vma *vma, u64 delta, u64 length,
|
||||
u32 end, len;
|
||||
|
||||
while (num) {
|
||||
struct nouveau_gpuobj *pgt = vm->pgt[pde].obj;
|
||||
struct nouveau_gpuobj *pgt = vm->pgt[pde].obj[big];
|
||||
|
||||
end = (pte + num);
|
||||
if (unlikely(end >= max))
|
||||
@ -110,6 +112,7 @@ void
|
||||
nouveau_vm_unmap_at(struct nouveau_vma *vma, u64 delta, u64 length)
|
||||
{
|
||||
struct nouveau_vm *vm = vma->vm;
|
||||
int big = vma->node->type != vm->spg_shift;
|
||||
u32 offset = vma->node->offset + (delta >> 12);
|
||||
u32 bits = vma->node->type - 12;
|
||||
u32 num = length >> vma->node->type;
|
||||
@ -119,7 +122,7 @@ nouveau_vm_unmap_at(struct nouveau_vma *vma, u64 delta, u64 length)
|
||||
u32 end, len;
|
||||
|
||||
while (num) {
|
||||
struct nouveau_gpuobj *pgt = vm->pgt[pde].obj;
|
||||
struct nouveau_gpuobj *pgt = vm->pgt[pde].obj[big];
|
||||
|
||||
end = (pte + num);
|
||||
if (unlikely(end >= max))
|
||||
@ -146,7 +149,7 @@ nouveau_vm_unmap(struct nouveau_vma *vma)
|
||||
}
|
||||
|
||||
static void
|
||||
nouveau_vm_unmap_pgt(struct nouveau_vm *vm, u32 fpde, u32 lpde)
|
||||
nouveau_vm_unmap_pgt(struct nouveau_vm *vm, int big, u32 fpde, u32 lpde)
|
||||
{
|
||||
struct nouveau_vm_pgd *vpgd;
|
||||
struct nouveau_vm_pgt *vpgt;
|
||||
@ -155,15 +158,15 @@ nouveau_vm_unmap_pgt(struct nouveau_vm *vm, u32 fpde, u32 lpde)
|
||||
|
||||
for (pde = fpde; pde <= lpde; pde++) {
|
||||
vpgt = &vm->pgt[pde - vm->fpde];
|
||||
if (--vpgt->refcount)
|
||||
if (--vpgt->refcount[big])
|
||||
continue;
|
||||
|
||||
list_for_each_entry(vpgd, &vm->pgd_list, head) {
|
||||
vm->unmap_pgt(vpgd->obj, pde);
|
||||
}
|
||||
pgt = vpgt->obj[big];
|
||||
vpgt->obj[big] = NULL;
|
||||
|
||||
pgt = vpgt->obj;
|
||||
vpgt->obj = NULL;
|
||||
list_for_each_entry(vpgd, &vm->pgd_list, head) {
|
||||
vm->map_pgt(vpgd->obj, pde, vpgt->obj);
|
||||
}
|
||||
|
||||
mutex_unlock(&vm->mm->mutex);
|
||||
nouveau_gpuobj_ref(NULL, &pgt);
|
||||
@ -177,6 +180,7 @@ nouveau_vm_map_pgt(struct nouveau_vm *vm, u32 pde, u32 type)
|
||||
struct nouveau_vm_pgt *vpgt = &vm->pgt[pde - vm->fpde];
|
||||
struct nouveau_vm_pgd *vpgd;
|
||||
struct nouveau_gpuobj *pgt;
|
||||
int big = (type != vm->spg_shift);
|
||||
u32 pgt_size;
|
||||
int ret;
|
||||
|
||||
@ -191,19 +195,18 @@ nouveau_vm_map_pgt(struct nouveau_vm *vm, u32 pde, u32 type)
|
||||
return ret;
|
||||
|
||||
/* someone beat us to filling the PDE while we didn't have the lock */
|
||||
if (unlikely(vpgt->refcount++)) {
|
||||
if (unlikely(vpgt->refcount[big]++)) {
|
||||
mutex_unlock(&vm->mm->mutex);
|
||||
nouveau_gpuobj_ref(NULL, &pgt);
|
||||
mutex_lock(&vm->mm->mutex);
|
||||
return 0;
|
||||
}
|
||||
|
||||
vpgt->obj[big] = pgt;
|
||||
list_for_each_entry(vpgd, &vm->pgd_list, head) {
|
||||
vm->map_pgt(vpgd->obj, type, pde, pgt);
|
||||
vm->map_pgt(vpgd->obj, pde, vpgt->obj);
|
||||
}
|
||||
|
||||
vpgt->page_shift = type;
|
||||
vpgt->obj = pgt;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -227,16 +230,17 @@ nouveau_vm_get(struct nouveau_vm *vm, u64 size, u32 page_shift,
|
||||
lpde = (vma->node->offset + vma->node->length - 1) >> vm->pgt_bits;
|
||||
for (pde = fpde; pde <= lpde; pde++) {
|
||||
struct nouveau_vm_pgt *vpgt = &vm->pgt[pde - vm->fpde];
|
||||
int big = (vma->node->type != vm->spg_shift);
|
||||
|
||||
if (likely(vpgt->refcount)) {
|
||||
vpgt->refcount++;
|
||||
if (likely(vpgt->refcount[big])) {
|
||||
vpgt->refcount[big]++;
|
||||
continue;
|
||||
}
|
||||
|
||||
ret = nouveau_vm_map_pgt(vm, pde, vma->node->type);
|
||||
if (ret) {
|
||||
if (pde != fpde)
|
||||
nouveau_vm_unmap_pgt(vm, fpde, pde - 1);
|
||||
nouveau_vm_unmap_pgt(vm, big, fpde, pde - 1);
|
||||
nouveau_mm_put(vm->mm, vma->node);
|
||||
mutex_unlock(&vm->mm->mutex);
|
||||
vma->node = NULL;
|
||||
@ -263,21 +267,20 @@ nouveau_vm_put(struct nouveau_vma *vma)
|
||||
lpde = (vma->node->offset + vma->node->length - 1) >> vm->pgt_bits;
|
||||
|
||||
mutex_lock(&vm->mm->mutex);
|
||||
nouveau_vm_unmap_pgt(vm, vma->node->type != vm->spg_shift, fpde, lpde);
|
||||
nouveau_mm_put(vm->mm, vma->node);
|
||||
vma->node = NULL;
|
||||
nouveau_vm_unmap_pgt(vm, fpde, lpde);
|
||||
mutex_unlock(&vm->mm->mutex);
|
||||
}
|
||||
|
||||
int
|
||||
nouveau_vm_new(struct drm_device *dev, u64 offset, u64 length, u64 mm_offset,
|
||||
u8 pgt_bits, u8 spg_shift, u8 lpg_shift,
|
||||
struct nouveau_vm **pvm)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nouveau_vm *vm;
|
||||
u64 mm_length = (offset + length) - mm_offset;
|
||||
u32 block;
|
||||
u32 block, pgt_bits;
|
||||
int ret;
|
||||
|
||||
vm = kzalloc(sizeof(*vm), GFP_KERNEL);
|
||||
@ -286,11 +289,40 @@ nouveau_vm_new(struct drm_device *dev, u64 offset, u64 length, u64 mm_offset,
|
||||
|
||||
if (dev_priv->card_type == NV_50) {
|
||||
vm->map_pgt = nv50_vm_map_pgt;
|
||||
vm->unmap_pgt = nv50_vm_unmap_pgt;
|
||||
vm->map = nv50_vm_map;
|
||||
vm->map_sg = nv50_vm_map_sg;
|
||||
vm->unmap = nv50_vm_unmap;
|
||||
vm->flush = nv50_vm_flush;
|
||||
vm->spg_shift = 12;
|
||||
vm->lpg_shift = 16;
|
||||
|
||||
pgt_bits = 29;
|
||||
block = (1 << pgt_bits);
|
||||
if (length < block)
|
||||
block = length;
|
||||
|
||||
} else
|
||||
if (dev_priv->card_type == NV_C0) {
|
||||
vm->map_pgt = nvc0_vm_map_pgt;
|
||||
vm->map = nvc0_vm_map;
|
||||
vm->map_sg = nvc0_vm_map_sg;
|
||||
vm->unmap = nvc0_vm_unmap;
|
||||
vm->flush = nvc0_vm_flush;
|
||||
vm->spg_shift = 12;
|
||||
vm->lpg_shift = 17;
|
||||
pgt_bits = 27;
|
||||
|
||||
/* Should be 4096 everywhere, this is a hack that's
|
||||
* currently necessary to avoid an elusive bug that
|
||||
* causes corruption when mixing small/large pages
|
||||
*/
|
||||
if (length < (1ULL << 40))
|
||||
block = 4096;
|
||||
else {
|
||||
block = (1 << pgt_bits);
|
||||
if (length < block)
|
||||
block = length;
|
||||
}
|
||||
} else {
|
||||
kfree(vm);
|
||||
return -ENOSYS;
|
||||
@ -308,12 +340,6 @@ nouveau_vm_new(struct drm_device *dev, u64 offset, u64 length, u64 mm_offset,
|
||||
vm->dev = dev;
|
||||
vm->refcount = 1;
|
||||
vm->pgt_bits = pgt_bits - 12;
|
||||
vm->spg_shift = spg_shift;
|
||||
vm->lpg_shift = lpg_shift;
|
||||
|
||||
block = (1 << pgt_bits);
|
||||
if (length < block)
|
||||
block = length;
|
||||
|
||||
ret = nouveau_mm_init(&vm->mm, mm_offset >> 12, mm_length >> 12,
|
||||
block >> 12);
|
||||
@ -342,16 +368,8 @@ nouveau_vm_link(struct nouveau_vm *vm, struct nouveau_gpuobj *pgd)
|
||||
nouveau_gpuobj_ref(pgd, &vpgd->obj);
|
||||
|
||||
mutex_lock(&vm->mm->mutex);
|
||||
for (i = vm->fpde; i <= vm->lpde; i++) {
|
||||
struct nouveau_vm_pgt *vpgt = &vm->pgt[i - vm->fpde];
|
||||
|
||||
if (!vpgt->obj) {
|
||||
vm->unmap_pgt(pgd, i);
|
||||
continue;
|
||||
}
|
||||
|
||||
vm->map_pgt(pgd, vpgt->page_shift, i, vpgt->obj);
|
||||
}
|
||||
for (i = vm->fpde; i <= vm->lpde; i++)
|
||||
vm->map_pgt(pgd, i, vm->pgt[i - vm->fpde].obj);
|
||||
list_add(&vpgd->head, &vm->pgd_list);
|
||||
mutex_unlock(&vm->mm->mutex);
|
||||
return 0;
|
||||
|
@ -31,9 +31,8 @@
|
||||
#include "nouveau_mm.h"
|
||||
|
||||
struct nouveau_vm_pgt {
|
||||
struct nouveau_gpuobj *obj;
|
||||
u32 page_shift;
|
||||
u32 refcount;
|
||||
struct nouveau_gpuobj *obj[2];
|
||||
u32 refcount[2];
|
||||
};
|
||||
|
||||
struct nouveau_vm_pgd {
|
||||
@ -65,9 +64,8 @@ struct nouveau_vm {
|
||||
u8 spg_shift;
|
||||
u8 lpg_shift;
|
||||
|
||||
void (*map_pgt)(struct nouveau_gpuobj *pgd, u32 type, u32 pde,
|
||||
struct nouveau_gpuobj *pgt);
|
||||
void (*unmap_pgt)(struct nouveau_gpuobj *pgd, u32 pde);
|
||||
void (*map_pgt)(struct nouveau_gpuobj *pgd, u32 pde,
|
||||
struct nouveau_gpuobj *pgt[2]);
|
||||
void (*map)(struct nouveau_vma *, struct nouveau_gpuobj *,
|
||||
struct nouveau_vram *, u32 pte, u32 cnt, u64 phys);
|
||||
void (*map_sg)(struct nouveau_vma *, struct nouveau_gpuobj *,
|
||||
@ -78,7 +76,6 @@ struct nouveau_vm {
|
||||
|
||||
/* nouveau_vm.c */
|
||||
int nouveau_vm_new(struct drm_device *, u64 offset, u64 length, u64 mm_offset,
|
||||
u8 pgt_bits, u8 spg_shift, u8 lpg_shift,
|
||||
struct nouveau_vm **);
|
||||
int nouveau_vm_ref(struct nouveau_vm *, struct nouveau_vm **,
|
||||
struct nouveau_gpuobj *pgd);
|
||||
@ -93,9 +90,8 @@ void nouveau_vm_map_sg(struct nouveau_vma *, u64 offset, u64 length,
|
||||
dma_addr_t *);
|
||||
|
||||
/* nv50_vm.c */
|
||||
void nv50_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 type, u32 pde,
|
||||
struct nouveau_gpuobj *pgt);
|
||||
void nv50_vm_unmap_pgt(struct nouveau_gpuobj *pgd, u32 pde);
|
||||
void nv50_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 pde,
|
||||
struct nouveau_gpuobj *pgt[2]);
|
||||
void nv50_vm_map(struct nouveau_vma *, struct nouveau_gpuobj *,
|
||||
struct nouveau_vram *, u32 pte, u32 cnt, u64 phys);
|
||||
void nv50_vm_map_sg(struct nouveau_vma *, struct nouveau_gpuobj *,
|
||||
@ -104,4 +100,14 @@ void nv50_vm_unmap(struct nouveau_gpuobj *, u32 pte, u32 cnt);
|
||||
void nv50_vm_flush(struct nouveau_vm *);
|
||||
void nv50_vm_flush_engine(struct drm_device *, int engine);
|
||||
|
||||
/* nvc0_vm.c */
|
||||
void nvc0_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 pde,
|
||||
struct nouveau_gpuobj *pgt[2]);
|
||||
void nvc0_vm_map(struct nouveau_vma *, struct nouveau_gpuobj *,
|
||||
struct nouveau_vram *, u32 pte, u32 cnt, u64 phys);
|
||||
void nvc0_vm_map_sg(struct nouveau_vma *, struct nouveau_gpuobj *,
|
||||
u32 pte, dma_addr_t *, u32 cnt);
|
||||
void nvc0_vm_unmap(struct nouveau_gpuobj *, u32 pte, u32 cnt);
|
||||
void nvc0_vm_flush(struct nouveau_vm *);
|
||||
|
||||
#endif
|
||||
|
@ -115,15 +115,16 @@ nv50_crtc_blank(struct nouveau_crtc *nv_crtc, bool blanked)
|
||||
OUT_RING(evo, 0);
|
||||
BEGIN_RING(evo, 0, NV50_EVO_CRTC(index, FB_DMA), 1);
|
||||
if (dev_priv->chipset != 0x50)
|
||||
if (nv_crtc->fb.tile_flags == 0x7a00)
|
||||
if (nv_crtc->fb.tile_flags == 0x7a00 ||
|
||||
nv_crtc->fb.tile_flags == 0xfe00)
|
||||
OUT_RING(evo, NvEvoFB32);
|
||||
else
|
||||
if (nv_crtc->fb.tile_flags == 0x7000)
|
||||
OUT_RING(evo, NvEvoFB16);
|
||||
else
|
||||
OUT_RING(evo, NvEvoVRAM);
|
||||
OUT_RING(evo, NvEvoVRAM_LP);
|
||||
else
|
||||
OUT_RING(evo, NvEvoVRAM);
|
||||
OUT_RING(evo, NvEvoVRAM_LP);
|
||||
}
|
||||
|
||||
nv_crtc->fb.blanked = blanked;
|
||||
@ -555,13 +556,14 @@ nv50_crtc_do_mode_set_base(struct drm_crtc *crtc,
|
||||
return ret;
|
||||
|
||||
BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, FB_DMA), 1);
|
||||
if (nv_crtc->fb.tile_flags == 0x7a00)
|
||||
if (nv_crtc->fb.tile_flags == 0x7a00 ||
|
||||
nv_crtc->fb.tile_flags == 0xfe00)
|
||||
OUT_RING(evo, NvEvoFB32);
|
||||
else
|
||||
if (nv_crtc->fb.tile_flags == 0x7000)
|
||||
OUT_RING(evo, NvEvoFB16);
|
||||
else
|
||||
OUT_RING(evo, NvEvoVRAM);
|
||||
OUT_RING(evo, NvEvoVRAM_LP);
|
||||
}
|
||||
|
||||
ret = RING_SPACE(evo, 12);
|
||||
@ -575,8 +577,10 @@ nv50_crtc_do_mode_set_base(struct drm_crtc *crtc,
|
||||
if (!nv_crtc->fb.tile_flags) {
|
||||
OUT_RING(evo, drm_fb->pitch | (1 << 20));
|
||||
} else {
|
||||
OUT_RING(evo, ((drm_fb->pitch / 4) << 4) |
|
||||
fb->nvbo->tile_mode);
|
||||
u32 tile_mode = fb->nvbo->tile_mode;
|
||||
if (dev_priv->card_type >= NV_C0)
|
||||
tile_mode >>= 4;
|
||||
OUT_RING(evo, ((drm_fb->pitch / 4) << 4) | tile_mode);
|
||||
}
|
||||
if (dev_priv->chipset == 0x50)
|
||||
OUT_RING(evo, (nv_crtc->fb.tile_flags << 8) | format);
|
||||
|
@ -53,7 +53,8 @@ nv50_evo_channel_del(struct nouveau_channel **pevo)
|
||||
|
||||
int
|
||||
nv50_evo_dmaobj_new(struct nouveau_channel *evo, u32 class, u32 name,
|
||||
u32 tile_flags, u32 magic_flags, u32 offset, u32 limit)
|
||||
u32 tile_flags, u32 magic_flags, u32 offset, u32 limit,
|
||||
u32 flags5)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = evo->dev->dev_private;
|
||||
struct drm_device *dev = evo->dev;
|
||||
@ -70,10 +71,7 @@ nv50_evo_dmaobj_new(struct nouveau_channel *evo, u32 class, u32 name,
|
||||
nv_wo32(obj, 8, offset);
|
||||
nv_wo32(obj, 12, 0x00000000);
|
||||
nv_wo32(obj, 16, 0x00000000);
|
||||
if (dev_priv->card_type < NV_C0)
|
||||
nv_wo32(obj, 20, 0x00010000);
|
||||
else
|
||||
nv_wo32(obj, 20, 0x00020000);
|
||||
nv_wo32(obj, 20, flags5);
|
||||
dev_priv->engine.instmem.flush(dev);
|
||||
|
||||
ret = nouveau_ramht_insert(evo, name, obj);
|
||||
@ -264,9 +262,31 @@ nv50_evo_create(struct drm_device *dev)
|
||||
}
|
||||
|
||||
/* create some default objects for the scanout memtypes we support */
|
||||
if (dev_priv->card_type >= NV_C0) {
|
||||
ret = nv50_evo_dmaobj_new(evo, 0x3d, NvEvoFB32, 0xfe, 0x19,
|
||||
0, 0xffffffff, 0x00000000);
|
||||
if (ret) {
|
||||
nv50_evo_channel_del(&dev_priv->evo);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = nv50_evo_dmaobj_new(evo, 0x3d, NvEvoVRAM, 0, 0x19,
|
||||
0, dev_priv->vram_size, 0x00020000);
|
||||
if (ret) {
|
||||
nv50_evo_channel_del(&dev_priv->evo);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = nv50_evo_dmaobj_new(evo, 0x3d, NvEvoVRAM_LP, 0, 0x19,
|
||||
0, dev_priv->vram_size, 0x00000000);
|
||||
if (ret) {
|
||||
nv50_evo_channel_del(&dev_priv->evo);
|
||||
return ret;
|
||||
}
|
||||
} else
|
||||
if (dev_priv->chipset != 0x50) {
|
||||
ret = nv50_evo_dmaobj_new(evo, 0x3d, NvEvoFB16, 0x70, 0x19,
|
||||
0, 0xffffffff);
|
||||
0, 0xffffffff, 0x00010000);
|
||||
if (ret) {
|
||||
nv50_evo_channel_del(&dev_priv->evo);
|
||||
return ret;
|
||||
@ -274,18 +294,25 @@ nv50_evo_create(struct drm_device *dev)
|
||||
|
||||
|
||||
ret = nv50_evo_dmaobj_new(evo, 0x3d, NvEvoFB32, 0x7a, 0x19,
|
||||
0, 0xffffffff);
|
||||
0, 0xffffffff, 0x00010000);
|
||||
if (ret) {
|
||||
nv50_evo_channel_del(&dev_priv->evo);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
ret = nv50_evo_dmaobj_new(evo, 0x3d, NvEvoVRAM, 0, 0x19,
|
||||
0, dev_priv->vram_size);
|
||||
if (ret) {
|
||||
nv50_evo_channel_del(&dev_priv->evo);
|
||||
return ret;
|
||||
ret = nv50_evo_dmaobj_new(evo, 0x3d, NvEvoVRAM, 0, 0x19,
|
||||
0, dev_priv->vram_size, 0x00010000);
|
||||
if (ret) {
|
||||
nv50_evo_channel_del(&dev_priv->evo);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = nv50_evo_dmaobj_new(evo, 0x3d, NvEvoVRAM_LP, 0, 0x19,
|
||||
0, dev_priv->vram_size, 0x00010000);
|
||||
if (ret) {
|
||||
nv50_evo_channel_del(&dev_priv->evo);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -1,3 +1,27 @@
|
||||
/*
|
||||
* Copyright 2010 Red Hat Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: Ben Skeggs
|
||||
*/
|
||||
|
||||
#include "drmP.h"
|
||||
#include "nouveau_drv.h"
|
||||
#include "nouveau_dma.h"
|
||||
|
@ -151,20 +151,19 @@ nv50_instmem_init(struct drm_device *dev)
|
||||
|
||||
/* BAR3 */
|
||||
ret = nouveau_vm_new(dev, BAR3_VM_BASE, BAR3_VM_SIZE, BAR3_VM_BASE,
|
||||
29, 12, 16, &dev_priv->bar3_vm);
|
||||
&dev_priv->bar3_vm);
|
||||
if (ret)
|
||||
goto error;
|
||||
|
||||
ret = nouveau_gpuobj_new(dev, NULL, (BAR3_VM_SIZE >> 12) * 8,
|
||||
0x1000, NVOBJ_FLAG_DONT_MAP |
|
||||
NVOBJ_FLAG_ZERO_ALLOC,
|
||||
&dev_priv->bar3_vm->pgt[0].obj);
|
||||
&dev_priv->bar3_vm->pgt[0].obj[0]);
|
||||
if (ret)
|
||||
goto error;
|
||||
dev_priv->bar3_vm->pgt[0].page_shift = 12;
|
||||
dev_priv->bar3_vm->pgt[0].refcount = 1;
|
||||
dev_priv->bar3_vm->pgt[0].refcount[0] = 1;
|
||||
|
||||
nv50_instmem_map(dev_priv->bar3_vm->pgt[0].obj);
|
||||
nv50_instmem_map(dev_priv->bar3_vm->pgt[0].obj[0]);
|
||||
|
||||
ret = nv50_channel_new(dev, 128 * 1024, dev_priv->bar3_vm, &chan);
|
||||
if (ret)
|
||||
@ -195,8 +194,7 @@ nv50_instmem_init(struct drm_device *dev)
|
||||
nv_wo32(chan->ramin, 0, tmp);
|
||||
|
||||
/* BAR1 */
|
||||
ret = nouveau_vm_new(dev, BAR1_VM_BASE, BAR1_VM_SIZE, BAR1_VM_BASE,
|
||||
29, 12, 16, &vm);
|
||||
ret = nouveau_vm_new(dev, BAR1_VM_BASE, BAR1_VM_SIZE, BAR1_VM_BASE, &vm);
|
||||
if (ret)
|
||||
goto error;
|
||||
|
||||
@ -220,7 +218,7 @@ nv50_instmem_init(struct drm_device *dev)
|
||||
* to catch "NULL pointer" references
|
||||
*/
|
||||
ret = nouveau_vm_new(dev, 0, (1ULL << 40), 0x0020000000ULL,
|
||||
29, 12, 16, &dev_priv->chan_vm);
|
||||
&dev_priv->chan_vm);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
@ -258,7 +256,7 @@ nv50_instmem_takedown(struct drm_device *dev)
|
||||
dev_priv->channels.ptr[127] = 0;
|
||||
nv50_channel_del(&dev_priv->channels.ptr[0]);
|
||||
|
||||
nouveau_gpuobj_ref(NULL, &dev_priv->bar3_vm->pgt[0].obj);
|
||||
nouveau_gpuobj_ref(NULL, &dev_priv->bar3_vm->pgt[0].obj[0]);
|
||||
nouveau_vm_ref(NULL, &dev_priv->bar3_vm, NULL);
|
||||
|
||||
if (dev_priv->ramin_heap.free_stack.next)
|
||||
|
@ -28,39 +28,40 @@
|
||||
#include "nouveau_vm.h"
|
||||
|
||||
void
|
||||
nv50_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 type, u32 pde,
|
||||
struct nouveau_gpuobj *pgt)
|
||||
nv50_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 pde,
|
||||
struct nouveau_gpuobj *pgt[2])
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = pgd->dev->dev_private;
|
||||
u32 coverage = (pgt->size >> 3) << type;
|
||||
u64 phys;
|
||||
u64 phys = 0xdeadcafe00000000ULL;
|
||||
u32 coverage = 0;
|
||||
|
||||
phys = pgt->vinst;
|
||||
phys |= 0x01; /* present */
|
||||
phys |= (type == 12) ? 0x02 : 0x00; /* 4KiB pages */
|
||||
if (dev_priv->vram_sys_base) {
|
||||
phys += dev_priv->vram_sys_base;
|
||||
phys |= 0x30;
|
||||
if (pgt[0]) {
|
||||
phys = 0x00000003 | pgt[0]->vinst; /* present, 4KiB pages */
|
||||
coverage = (pgt[0]->size >> 3) << 12;
|
||||
} else
|
||||
if (pgt[1]) {
|
||||
phys = 0x00000001 | pgt[1]->vinst; /* present */
|
||||
coverage = (pgt[1]->size >> 3) << 16;
|
||||
}
|
||||
|
||||
if (coverage <= 32 * 1024 * 1024)
|
||||
phys |= 0x60;
|
||||
else if (coverage <= 64 * 1024 * 1024)
|
||||
phys |= 0x40;
|
||||
else if (coverage < 128 * 1024 * 1024)
|
||||
phys |= 0x20;
|
||||
if (phys & 1) {
|
||||
if (dev_priv->vram_sys_base) {
|
||||
phys += dev_priv->vram_sys_base;
|
||||
phys |= 0x30;
|
||||
}
|
||||
|
||||
if (coverage <= 32 * 1024 * 1024)
|
||||
phys |= 0x60;
|
||||
else if (coverage <= 64 * 1024 * 1024)
|
||||
phys |= 0x40;
|
||||
else if (coverage < 128 * 1024 * 1024)
|
||||
phys |= 0x20;
|
||||
}
|
||||
|
||||
nv_wo32(pgd, (pde * 8) + 0, lower_32_bits(phys));
|
||||
nv_wo32(pgd, (pde * 8) + 4, upper_32_bits(phys));
|
||||
}
|
||||
|
||||
void
|
||||
nv50_vm_unmap_pgt(struct nouveau_gpuobj *pgd, u32 pde)
|
||||
{
|
||||
nv_wo32(pgd, (pde * 8) + 0, 0x00000000);
|
||||
nv_wo32(pgd, (pde * 8) + 4, 0xdeadcafe);
|
||||
}
|
||||
|
||||
static inline u64
|
||||
nv50_vm_addr(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
|
||||
u64 phys, u32 memtype, u32 target)
|
||||
@ -91,7 +92,8 @@ void
|
||||
nv50_vm_map(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
|
||||
struct nouveau_vram *mem, u32 pte, u32 cnt, u64 phys)
|
||||
{
|
||||
u32 block, i;
|
||||
u32 block;
|
||||
int i;
|
||||
|
||||
phys = nv50_vm_addr(vma, pgt, phys, mem->memtype, 0);
|
||||
pte <<= 3;
|
||||
|
@ -42,7 +42,7 @@ nv50_vram_flags_valid(struct drm_device *dev, u32 tile_flags)
|
||||
{
|
||||
int type = (tile_flags & NOUVEAU_GEM_TILE_LAYOUT_MASK) >> 8;
|
||||
|
||||
if (likely(type < sizeof(types) && types[type]))
|
||||
if (likely(type < ARRAY_SIZE(types) && types[type]))
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
|
271
drivers/gpu/drm/nouveau/nvc0_fbcon.c
Normal file
271
drivers/gpu/drm/nouveau/nvc0_fbcon.c
Normal file
@ -0,0 +1,271 @@
|
||||
/*
|
||||
* Copyright 2010 Red Hat Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: Ben Skeggs
|
||||
*/
|
||||
|
||||
#include "drmP.h"
|
||||
#include "nouveau_drv.h"
|
||||
#include "nouveau_dma.h"
|
||||
#include "nouveau_ramht.h"
|
||||
#include "nouveau_fbcon.h"
|
||||
#include "nouveau_mm.h"
|
||||
|
||||
int
|
||||
nvc0_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
|
||||
{
|
||||
struct nouveau_fbdev *nfbdev = info->par;
|
||||
struct drm_device *dev = nfbdev->dev;
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nouveau_channel *chan = dev_priv->channel;
|
||||
int ret;
|
||||
|
||||
ret = RING_SPACE(chan, rect->rop == ROP_COPY ? 7 : 11);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (rect->rop != ROP_COPY) {
|
||||
BEGIN_NVC0(chan, 2, NvSub2D, 0x02ac, 1);
|
||||
OUT_RING (chan, 1);
|
||||
}
|
||||
BEGIN_NVC0(chan, 2, NvSub2D, 0x0588, 1);
|
||||
if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
|
||||
info->fix.visual == FB_VISUAL_DIRECTCOLOR)
|
||||
OUT_RING (chan, ((uint32_t *)info->pseudo_palette)[rect->color]);
|
||||
else
|
||||
OUT_RING (chan, rect->color);
|
||||
BEGIN_NVC0(chan, 2, NvSub2D, 0x0600, 4);
|
||||
OUT_RING (chan, rect->dx);
|
||||
OUT_RING (chan, rect->dy);
|
||||
OUT_RING (chan, rect->dx + rect->width);
|
||||
OUT_RING (chan, rect->dy + rect->height);
|
||||
if (rect->rop != ROP_COPY) {
|
||||
BEGIN_NVC0(chan, 2, NvSub2D, 0x02ac, 1);
|
||||
OUT_RING (chan, 3);
|
||||
}
|
||||
FIRE_RING(chan);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int
|
||||
nvc0_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
|
||||
{
|
||||
struct nouveau_fbdev *nfbdev = info->par;
|
||||
struct drm_device *dev = nfbdev->dev;
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nouveau_channel *chan = dev_priv->channel;
|
||||
int ret;
|
||||
|
||||
ret = RING_SPACE(chan, 12);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
BEGIN_NVC0(chan, 2, NvSub2D, 0x0110, 1);
|
||||
OUT_RING (chan, 0);
|
||||
BEGIN_NVC0(chan, 2, NvSub2D, 0x08b0, 4);
|
||||
OUT_RING (chan, region->dx);
|
||||
OUT_RING (chan, region->dy);
|
||||
OUT_RING (chan, region->width);
|
||||
OUT_RING (chan, region->height);
|
||||
BEGIN_NVC0(chan, 2, NvSub2D, 0x08d0, 4);
|
||||
OUT_RING (chan, 0);
|
||||
OUT_RING (chan, region->sx);
|
||||
OUT_RING (chan, 0);
|
||||
OUT_RING (chan, region->sy);
|
||||
FIRE_RING(chan);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int
|
||||
nvc0_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
|
||||
{
|
||||
struct nouveau_fbdev *nfbdev = info->par;
|
||||
struct drm_device *dev = nfbdev->dev;
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nouveau_channel *chan = dev_priv->channel;
|
||||
uint32_t width, dwords, *data = (uint32_t *)image->data;
|
||||
uint32_t mask = ~(~0 >> (32 - info->var.bits_per_pixel));
|
||||
uint32_t *palette = info->pseudo_palette;
|
||||
int ret;
|
||||
|
||||
if (image->depth != 1)
|
||||
return -ENODEV;
|
||||
|
||||
ret = RING_SPACE(chan, 11);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
width = ALIGN(image->width, 32);
|
||||
dwords = (width * image->height) >> 5;
|
||||
|
||||
BEGIN_NVC0(chan, 2, NvSub2D, 0x0814, 2);
|
||||
if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
|
||||
info->fix.visual == FB_VISUAL_DIRECTCOLOR) {
|
||||
OUT_RING (chan, palette[image->bg_color] | mask);
|
||||
OUT_RING (chan, palette[image->fg_color] | mask);
|
||||
} else {
|
||||
OUT_RING (chan, image->bg_color);
|
||||
OUT_RING (chan, image->fg_color);
|
||||
}
|
||||
BEGIN_NVC0(chan, 2, NvSub2D, 0x0838, 2);
|
||||
OUT_RING (chan, image->width);
|
||||
OUT_RING (chan, image->height);
|
||||
BEGIN_NVC0(chan, 2, NvSub2D, 0x0850, 4);
|
||||
OUT_RING (chan, 0);
|
||||
OUT_RING (chan, image->dx);
|
||||
OUT_RING (chan, 0);
|
||||
OUT_RING (chan, image->dy);
|
||||
|
||||
while (dwords) {
|
||||
int push = dwords > 2047 ? 2047 : dwords;
|
||||
|
||||
ret = RING_SPACE(chan, push + 1);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
dwords -= push;
|
||||
|
||||
BEGIN_NVC0(chan, 6, NvSub2D, 0x0860, push);
|
||||
OUT_RINGp(chan, data, push);
|
||||
data += push;
|
||||
}
|
||||
|
||||
FIRE_RING(chan);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int
|
||||
nvc0_fbcon_accel_init(struct fb_info *info)
|
||||
{
|
||||
struct nouveau_fbdev *nfbdev = info->par;
|
||||
struct drm_device *dev = nfbdev->dev;
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nouveau_channel *chan = dev_priv->channel;
|
||||
struct nouveau_bo *nvbo = nfbdev->nouveau_fb.nvbo;
|
||||
int ret, format;
|
||||
|
||||
ret = nouveau_gpuobj_gr_new(chan, 0x902d, 0x902d);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
switch (info->var.bits_per_pixel) {
|
||||
case 8:
|
||||
format = 0xf3;
|
||||
break;
|
||||
case 15:
|
||||
format = 0xf8;
|
||||
break;
|
||||
case 16:
|
||||
format = 0xe8;
|
||||
break;
|
||||
case 32:
|
||||
switch (info->var.transp.length) {
|
||||
case 0: /* depth 24 */
|
||||
case 8: /* depth 32, just use 24.. */
|
||||
format = 0xe6;
|
||||
break;
|
||||
case 2: /* depth 30 */
|
||||
format = 0xd1;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ret = RING_SPACE(chan, 60);
|
||||
if (ret) {
|
||||
WARN_ON(1);
|
||||
nouveau_fbcon_gpu_lockup(info);
|
||||
return ret;
|
||||
}
|
||||
|
||||
printk(KERN_ERR "fb vma 0x%010llx\n", nvbo->vma.offset);
|
||||
|
||||
BEGIN_NVC0(chan, 2, NvSub2D, 0x0000, 1);
|
||||
OUT_RING (chan, 0x0000902d);
|
||||
BEGIN_NVC0(chan, 2, NvSub2D, 0x0104, 2);
|
||||
OUT_RING (chan, upper_32_bits(chan->notifier_bo->bo.offset));
|
||||
OUT_RING (chan, lower_32_bits(chan->notifier_bo->bo.offset));
|
||||
BEGIN_NVC0(chan, 2, NvSub2D, 0x0290, 1);
|
||||
OUT_RING (chan, 0);
|
||||
BEGIN_NVC0(chan, 2, NvSub2D, 0x0888, 1);
|
||||
OUT_RING (chan, 1);
|
||||
BEGIN_NVC0(chan, 2, NvSub2D, 0x02ac, 1);
|
||||
OUT_RING (chan, 3);
|
||||
BEGIN_NVC0(chan, 2, NvSub2D, 0x02a0, 1);
|
||||
OUT_RING (chan, 0x55);
|
||||
BEGIN_NVC0(chan, 2, NvSub2D, 0x08c0, 4);
|
||||
OUT_RING (chan, 0);
|
||||
OUT_RING (chan, 1);
|
||||
OUT_RING (chan, 0);
|
||||
OUT_RING (chan, 1);
|
||||
BEGIN_NVC0(chan, 2, NvSub2D, 0x0580, 2);
|
||||
OUT_RING (chan, 4);
|
||||
OUT_RING (chan, format);
|
||||
BEGIN_NVC0(chan, 2, NvSub2D, 0x02e8, 2);
|
||||
OUT_RING (chan, 2);
|
||||
OUT_RING (chan, 1);
|
||||
|
||||
BEGIN_NVC0(chan, 2, NvSub2D, 0x0804, 1);
|
||||
OUT_RING (chan, format);
|
||||
BEGIN_NVC0(chan, 2, NvSub2D, 0x0800, 1);
|
||||
OUT_RING (chan, 1);
|
||||
BEGIN_NVC0(chan, 2, NvSub2D, 0x0808, 3);
|
||||
OUT_RING (chan, 0);
|
||||
OUT_RING (chan, 0);
|
||||
OUT_RING (chan, 1);
|
||||
BEGIN_NVC0(chan, 2, NvSub2D, 0x081c, 1);
|
||||
OUT_RING (chan, 1);
|
||||
BEGIN_NVC0(chan, 2, NvSub2D, 0x0840, 4);
|
||||
OUT_RING (chan, 0);
|
||||
OUT_RING (chan, 1);
|
||||
OUT_RING (chan, 0);
|
||||
OUT_RING (chan, 1);
|
||||
BEGIN_NVC0(chan, 2, NvSub2D, 0x0200, 10);
|
||||
OUT_RING (chan, format);
|
||||
OUT_RING (chan, 1);
|
||||
OUT_RING (chan, 0);
|
||||
OUT_RING (chan, 1);
|
||||
OUT_RING (chan, 0);
|
||||
OUT_RING (chan, info->fix.line_length);
|
||||
OUT_RING (chan, info->var.xres_virtual);
|
||||
OUT_RING (chan, info->var.yres_virtual);
|
||||
OUT_RING (chan, upper_32_bits(nvbo->vma.offset));
|
||||
OUT_RING (chan, lower_32_bits(nvbo->vma.offset));
|
||||
BEGIN_NVC0(chan, 2, NvSub2D, 0x0230, 10);
|
||||
OUT_RING (chan, format);
|
||||
OUT_RING (chan, 1);
|
||||
OUT_RING (chan, 0);
|
||||
OUT_RING (chan, 1);
|
||||
OUT_RING (chan, 0);
|
||||
OUT_RING (chan, info->fix.line_length);
|
||||
OUT_RING (chan, info->var.xres_virtual);
|
||||
OUT_RING (chan, info->var.yres_virtual);
|
||||
OUT_RING (chan, upper_32_bits(nvbo->vma.offset));
|
||||
OUT_RING (chan, lower_32_bits(nvbo->vma.offset));
|
||||
FIRE_RING (chan);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -25,6 +25,48 @@
|
||||
#include "drmP.h"
|
||||
|
||||
#include "nouveau_drv.h"
|
||||
#include "nouveau_mm.h"
|
||||
|
||||
static void nvc0_fifo_isr(struct drm_device *);
|
||||
|
||||
struct nvc0_fifo_priv {
|
||||
struct nouveau_gpuobj *playlist[2];
|
||||
int cur_playlist;
|
||||
struct nouveau_vma user_vma;
|
||||
};
|
||||
|
||||
struct nvc0_fifo_chan {
|
||||
struct nouveau_bo *user;
|
||||
struct nouveau_gpuobj *ramfc;
|
||||
};
|
||||
|
||||
static void
|
||||
nvc0_fifo_playlist_update(struct drm_device *dev)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
|
||||
struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
|
||||
struct nvc0_fifo_priv *priv = pfifo->priv;
|
||||
struct nouveau_gpuobj *cur;
|
||||
int i, p;
|
||||
|
||||
cur = priv->playlist[priv->cur_playlist];
|
||||
priv->cur_playlist = !priv->cur_playlist;
|
||||
|
||||
for (i = 0, p = 0; i < 128; i++) {
|
||||
if (!(nv_rd32(dev, 0x3004 + (i * 8)) & 1))
|
||||
continue;
|
||||
nv_wo32(cur, p + 0, i);
|
||||
nv_wo32(cur, p + 4, 0x00000004);
|
||||
p += 8;
|
||||
}
|
||||
pinstmem->flush(dev);
|
||||
|
||||
nv_wr32(dev, 0x002270, cur->vinst >> 12);
|
||||
nv_wr32(dev, 0x002274, 0x01f00000 | (p >> 3));
|
||||
if (!nv_wait(dev, 0x00227c, 0x00100000, 0x00000000))
|
||||
NV_ERROR(dev, "PFIFO - playlist update failed\n");
|
||||
}
|
||||
|
||||
void
|
||||
nvc0_fifo_disable(struct drm_device *dev)
|
||||
@ -57,12 +99,135 @@ nvc0_fifo_channel_id(struct drm_device *dev)
|
||||
int
|
||||
nvc0_fifo_create_context(struct nouveau_channel *chan)
|
||||
{
|
||||
struct drm_device *dev = chan->dev;
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
|
||||
struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
|
||||
struct nvc0_fifo_priv *priv = pfifo->priv;
|
||||
struct nvc0_fifo_chan *fifoch;
|
||||
u64 ib_virt, user_vinst;
|
||||
int ret;
|
||||
|
||||
chan->fifo_priv = kzalloc(sizeof(*fifoch), GFP_KERNEL);
|
||||
if (!chan->fifo_priv)
|
||||
return -ENOMEM;
|
||||
fifoch = chan->fifo_priv;
|
||||
|
||||
/* allocate vram for control regs, map into polling area */
|
||||
ret = nouveau_bo_new(dev, NULL, 0x1000, 0, TTM_PL_FLAG_VRAM,
|
||||
0, 0, true, true, &fifoch->user);
|
||||
if (ret)
|
||||
goto error;
|
||||
|
||||
ret = nouveau_bo_pin(fifoch->user, TTM_PL_FLAG_VRAM);
|
||||
if (ret) {
|
||||
nouveau_bo_ref(NULL, &fifoch->user);
|
||||
goto error;
|
||||
}
|
||||
|
||||
user_vinst = fifoch->user->bo.mem.start << PAGE_SHIFT;
|
||||
|
||||
ret = nouveau_bo_map(fifoch->user);
|
||||
if (ret) {
|
||||
nouveau_bo_unpin(fifoch->user);
|
||||
nouveau_bo_ref(NULL, &fifoch->user);
|
||||
goto error;
|
||||
}
|
||||
|
||||
nouveau_vm_map_at(&priv->user_vma, chan->id * 0x1000,
|
||||
fifoch->user->bo.mem.mm_node);
|
||||
|
||||
chan->user = ioremap_wc(pci_resource_start(dev->pdev, 1) +
|
||||
priv->user_vma.offset + (chan->id * 0x1000),
|
||||
PAGE_SIZE);
|
||||
if (!chan->user) {
|
||||
ret = -ENOMEM;
|
||||
goto error;
|
||||
}
|
||||
|
||||
ib_virt = chan->pushbuf_base + chan->dma.ib_base * 4;
|
||||
|
||||
/* zero channel regs */
|
||||
nouveau_bo_wr32(fifoch->user, 0x0040/4, 0);
|
||||
nouveau_bo_wr32(fifoch->user, 0x0044/4, 0);
|
||||
nouveau_bo_wr32(fifoch->user, 0x0048/4, 0);
|
||||
nouveau_bo_wr32(fifoch->user, 0x004c/4, 0);
|
||||
nouveau_bo_wr32(fifoch->user, 0x0050/4, 0);
|
||||
nouveau_bo_wr32(fifoch->user, 0x0058/4, 0);
|
||||
nouveau_bo_wr32(fifoch->user, 0x005c/4, 0);
|
||||
nouveau_bo_wr32(fifoch->user, 0x0060/4, 0);
|
||||
nouveau_bo_wr32(fifoch->user, 0x0088/4, 0);
|
||||
nouveau_bo_wr32(fifoch->user, 0x008c/4, 0);
|
||||
|
||||
/* ramfc */
|
||||
ret = nouveau_gpuobj_new_fake(dev, chan->ramin->pinst,
|
||||
chan->ramin->vinst, 0x100,
|
||||
NVOBJ_FLAG_ZERO_ALLOC, &fifoch->ramfc);
|
||||
if (ret)
|
||||
goto error;
|
||||
|
||||
nv_wo32(fifoch->ramfc, 0x08, lower_32_bits(user_vinst));
|
||||
nv_wo32(fifoch->ramfc, 0x0c, upper_32_bits(user_vinst));
|
||||
nv_wo32(fifoch->ramfc, 0x10, 0x0000face);
|
||||
nv_wo32(fifoch->ramfc, 0x30, 0xfffff902);
|
||||
nv_wo32(fifoch->ramfc, 0x48, lower_32_bits(ib_virt));
|
||||
nv_wo32(fifoch->ramfc, 0x4c, drm_order(chan->dma.ib_max + 1) << 16 |
|
||||
upper_32_bits(ib_virt));
|
||||
nv_wo32(fifoch->ramfc, 0x54, 0x00000002);
|
||||
nv_wo32(fifoch->ramfc, 0x84, 0x20400000);
|
||||
nv_wo32(fifoch->ramfc, 0x94, 0x30000001);
|
||||
nv_wo32(fifoch->ramfc, 0x9c, 0x00000100);
|
||||
nv_wo32(fifoch->ramfc, 0xa4, 0x1f1f1f1f);
|
||||
nv_wo32(fifoch->ramfc, 0xa8, 0x1f1f1f1f);
|
||||
nv_wo32(fifoch->ramfc, 0xac, 0x0000001f);
|
||||
nv_wo32(fifoch->ramfc, 0xb8, 0xf8000000);
|
||||
nv_wo32(fifoch->ramfc, 0xf8, 0x10003080); /* 0x002310 */
|
||||
nv_wo32(fifoch->ramfc, 0xfc, 0x10000010); /* 0x002350 */
|
||||
pinstmem->flush(dev);
|
||||
|
||||
nv_wr32(dev, 0x003000 + (chan->id * 8), 0xc0000000 |
|
||||
(chan->ramin->vinst >> 12));
|
||||
nv_wr32(dev, 0x003004 + (chan->id * 8), 0x001f0001);
|
||||
nvc0_fifo_playlist_update(dev);
|
||||
return 0;
|
||||
|
||||
error:
|
||||
pfifo->destroy_context(chan);
|
||||
return ret;
|
||||
}
|
||||
|
||||
void
|
||||
nvc0_fifo_destroy_context(struct nouveau_channel *chan)
|
||||
{
|
||||
struct drm_device *dev = chan->dev;
|
||||
struct nvc0_fifo_chan *fifoch;
|
||||
|
||||
nv_mask(dev, 0x003004 + (chan->id * 8), 0x00000001, 0x00000000);
|
||||
nv_wr32(dev, 0x002634, chan->id);
|
||||
if (!nv_wait(dev, 0x0002634, 0xffffffff, chan->id))
|
||||
NV_WARN(dev, "0x2634 != chid: 0x%08x\n", nv_rd32(dev, 0x2634));
|
||||
|
||||
nvc0_fifo_playlist_update(dev);
|
||||
|
||||
nv_wr32(dev, 0x003000 + (chan->id * 8), 0x00000000);
|
||||
|
||||
if (chan->user) {
|
||||
iounmap(chan->user);
|
||||
chan->user = NULL;
|
||||
}
|
||||
|
||||
fifoch = chan->fifo_priv;
|
||||
chan->fifo_priv = NULL;
|
||||
if (!fifoch)
|
||||
return;
|
||||
|
||||
nouveau_gpuobj_ref(NULL, &fifoch->ramfc);
|
||||
if (fifoch->user) {
|
||||
nouveau_bo_unmap(fifoch->user);
|
||||
nouveau_bo_unpin(fifoch->user);
|
||||
nouveau_bo_ref(NULL, &fifoch->user);
|
||||
}
|
||||
kfree(fifoch);
|
||||
}
|
||||
|
||||
int
|
||||
@ -77,14 +242,208 @@ nvc0_fifo_unload_context(struct drm_device *dev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
nvc0_fifo_destroy(struct drm_device *dev)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
|
||||
struct nvc0_fifo_priv *priv;
|
||||
|
||||
priv = pfifo->priv;
|
||||
if (!priv)
|
||||
return;
|
||||
|
||||
nouveau_vm_put(&priv->user_vma);
|
||||
nouveau_gpuobj_ref(NULL, &priv->playlist[1]);
|
||||
nouveau_gpuobj_ref(NULL, &priv->playlist[0]);
|
||||
kfree(priv);
|
||||
}
|
||||
|
||||
void
|
||||
nvc0_fifo_takedown(struct drm_device *dev)
|
||||
{
|
||||
nv_wr32(dev, 0x002140, 0x00000000);
|
||||
nvc0_fifo_destroy(dev);
|
||||
}
|
||||
|
||||
static int
|
||||
nvc0_fifo_create(struct drm_device *dev)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
|
||||
struct nvc0_fifo_priv *priv;
|
||||
int ret;
|
||||
|
||||
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
|
||||
if (!priv)
|
||||
return -ENOMEM;
|
||||
pfifo->priv = priv;
|
||||
|
||||
ret = nouveau_gpuobj_new(dev, NULL, 0x1000, 0x1000, 0,
|
||||
&priv->playlist[0]);
|
||||
if (ret)
|
||||
goto error;
|
||||
|
||||
ret = nouveau_gpuobj_new(dev, NULL, 0x1000, 0x1000, 0,
|
||||
&priv->playlist[1]);
|
||||
if (ret)
|
||||
goto error;
|
||||
|
||||
ret = nouveau_vm_get(dev_priv->bar1_vm, pfifo->channels * 0x1000,
|
||||
12, NV_MEM_ACCESS_RW, &priv->user_vma);
|
||||
if (ret)
|
||||
goto error;
|
||||
|
||||
nouveau_irq_register(dev, 8, nvc0_fifo_isr);
|
||||
NVOBJ_CLASS(dev, 0x506e, SW); /* nvsw */
|
||||
return 0;
|
||||
|
||||
error:
|
||||
nvc0_fifo_destroy(dev);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int
|
||||
nvc0_fifo_init(struct drm_device *dev)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
|
||||
struct nvc0_fifo_priv *priv;
|
||||
int ret, i;
|
||||
|
||||
if (!pfifo->priv) {
|
||||
ret = nvc0_fifo_create(dev);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
priv = pfifo->priv;
|
||||
|
||||
/* reset PFIFO, enable all available PSUBFIFO areas */
|
||||
nv_mask(dev, 0x000200, 0x00000100, 0x00000000);
|
||||
nv_mask(dev, 0x000200, 0x00000100, 0x00000100);
|
||||
nv_wr32(dev, 0x000204, 0xffffffff);
|
||||
nv_wr32(dev, 0x002204, 0xffffffff);
|
||||
|
||||
/* assign engines to subfifos */
|
||||
nv_wr32(dev, 0x002208, ~(1 << 0)); /* PGRAPH */
|
||||
nv_wr32(dev, 0x00220c, ~(1 << 1)); /* PVP */
|
||||
nv_wr32(dev, 0x002210, ~(1 << 1)); /* PPP */
|
||||
nv_wr32(dev, 0x002214, ~(1 << 1)); /* PBSP */
|
||||
nv_wr32(dev, 0x002218, ~(1 << 2)); /* PCE0 */
|
||||
nv_wr32(dev, 0x00221c, ~(1 << 1)); /* PCE1 */
|
||||
|
||||
/* PSUBFIFO[n] */
|
||||
for (i = 0; i < 3; i++) {
|
||||
nv_mask(dev, 0x04013c + (i * 0x2000), 0x10000100, 0x00000000);
|
||||
nv_wr32(dev, 0x040108 + (i * 0x2000), 0xffffffff); /* INTR */
|
||||
nv_wr32(dev, 0x04010c + (i * 0x2000), 0xfffffeff); /* INTR_EN */
|
||||
}
|
||||
|
||||
nv_mask(dev, 0x002200, 0x00000001, 0x00000001);
|
||||
nv_wr32(dev, 0x002254, 0x10000000 | priv->user_vma.offset >> 12);
|
||||
|
||||
nv_wr32(dev, 0x002a00, 0xffffffff); /* clears PFIFO.INTR bit 30 */
|
||||
nv_wr32(dev, 0x002100, 0xffffffff);
|
||||
nv_wr32(dev, 0x002140, 0xbfffffff);
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct nouveau_enum nvc0_fifo_fault_unit[] = {
|
||||
{ 0, "PGRAPH" },
|
||||
{ 3, "PEEPHOLE" },
|
||||
{ 4, "BAR1" },
|
||||
{ 5, "BAR3" },
|
||||
{ 7, "PFIFO" },
|
||||
{}
|
||||
};
|
||||
|
||||
struct nouveau_enum nvc0_fifo_fault_reason[] = {
|
||||
{ 0, "PT_NOT_PRESENT" },
|
||||
{ 1, "PT_TOO_SHORT" },
|
||||
{ 2, "PAGE_NOT_PRESENT" },
|
||||
{ 3, "VM_LIMIT_EXCEEDED" },
|
||||
{}
|
||||
};
|
||||
|
||||
struct nouveau_bitfield nvc0_fifo_subfifo_intr[] = {
|
||||
/* { 0x00008000, "" } seen with null ib push */
|
||||
{ 0x00200000, "ILLEGAL_MTHD" },
|
||||
{ 0x00800000, "EMPTY_SUBC" },
|
||||
{}
|
||||
};
|
||||
|
||||
static void
|
||||
nvc0_fifo_isr_vm_fault(struct drm_device *dev, int unit)
|
||||
{
|
||||
u32 inst = nv_rd32(dev, 0x2800 + (unit * 0x10));
|
||||
u32 valo = nv_rd32(dev, 0x2804 + (unit * 0x10));
|
||||
u32 vahi = nv_rd32(dev, 0x2808 + (unit * 0x10));
|
||||
u32 stat = nv_rd32(dev, 0x280c + (unit * 0x10));
|
||||
|
||||
NV_INFO(dev, "PFIFO: %s fault at 0x%010llx [",
|
||||
(stat & 0x00000080) ? "write" : "read", (u64)vahi << 32 | valo);
|
||||
nouveau_enum_print(nvc0_fifo_fault_reason, stat & 0x0000000f);
|
||||
printk("] from ");
|
||||
nouveau_enum_print(nvc0_fifo_fault_unit, unit);
|
||||
printk(" on channel 0x%010llx\n", (u64)inst << 12);
|
||||
}
|
||||
|
||||
static void
|
||||
nvc0_fifo_isr_subfifo_intr(struct drm_device *dev, int unit)
|
||||
{
|
||||
u32 stat = nv_rd32(dev, 0x040108 + (unit * 0x2000));
|
||||
u32 addr = nv_rd32(dev, 0x0400c0 + (unit * 0x2000));
|
||||
u32 data = nv_rd32(dev, 0x0400c4 + (unit * 0x2000));
|
||||
u32 chid = nv_rd32(dev, 0x040120 + (unit * 0x2000)) & 0x7f;
|
||||
u32 subc = (addr & 0x00070000);
|
||||
u32 mthd = (addr & 0x00003ffc);
|
||||
|
||||
NV_INFO(dev, "PSUBFIFO %d:", unit);
|
||||
nouveau_bitfield_print(nvc0_fifo_subfifo_intr, stat);
|
||||
NV_INFO(dev, "PSUBFIFO %d: ch %d subc %d mthd 0x%04x data 0x%08x\n",
|
||||
unit, chid, subc, mthd, data);
|
||||
|
||||
nv_wr32(dev, 0x0400c0 + (unit * 0x2000), 0x80600008);
|
||||
nv_wr32(dev, 0x040108 + (unit * 0x2000), stat);
|
||||
}
|
||||
|
||||
static void
|
||||
nvc0_fifo_isr(struct drm_device *dev)
|
||||
{
|
||||
u32 stat = nv_rd32(dev, 0x002100);
|
||||
|
||||
if (stat & 0x10000000) {
|
||||
u32 units = nv_rd32(dev, 0x00259c);
|
||||
u32 u = units;
|
||||
|
||||
while (u) {
|
||||
int i = ffs(u) - 1;
|
||||
nvc0_fifo_isr_vm_fault(dev, i);
|
||||
u &= ~(1 << i);
|
||||
}
|
||||
|
||||
nv_wr32(dev, 0x00259c, units);
|
||||
stat &= ~0x10000000;
|
||||
}
|
||||
|
||||
if (stat & 0x20000000) {
|
||||
u32 units = nv_rd32(dev, 0x0025a0);
|
||||
u32 u = units;
|
||||
|
||||
while (u) {
|
||||
int i = ffs(u) - 1;
|
||||
nvc0_fifo_isr_subfifo_intr(dev, i);
|
||||
u &= ~(1 << i);
|
||||
}
|
||||
|
||||
nv_wr32(dev, 0x0025a0, units);
|
||||
stat &= ~0x20000000;
|
||||
}
|
||||
|
||||
if (stat) {
|
||||
NV_INFO(dev, "PFIFO: unhandled status 0x%08x\n", stat);
|
||||
nv_wr32(dev, 0x002100, stat);
|
||||
}
|
||||
|
||||
nv_wr32(dev, 0x2140, 0);
|
||||
}
|
||||
|
@ -22,9 +22,16 @@
|
||||
* Authors: Ben Skeggs
|
||||
*/
|
||||
|
||||
#include <linux/firmware.h>
|
||||
|
||||
#include "drmP.h"
|
||||
|
||||
#include "nouveau_drv.h"
|
||||
#include "nouveau_mm.h"
|
||||
#include "nvc0_graph.h"
|
||||
|
||||
static void nvc0_graph_isr(struct drm_device *);
|
||||
static int nvc0_graph_unload_context_to(struct drm_device *dev, u64 chan);
|
||||
|
||||
void
|
||||
nvc0_graph_fifo_access(struct drm_device *dev, bool enabled)
|
||||
@ -37,39 +44,739 @@ nvc0_graph_channel(struct drm_device *dev)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static int
|
||||
nvc0_graph_construct_context(struct nouveau_channel *chan)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
|
||||
struct nvc0_graph_priv *priv = dev_priv->engine.graph.priv;
|
||||
struct nvc0_graph_chan *grch = chan->pgraph_ctx;
|
||||
struct drm_device *dev = chan->dev;
|
||||
int ret, i;
|
||||
u32 *ctx;
|
||||
|
||||
ctx = kmalloc(priv->grctx_size, GFP_KERNEL);
|
||||
if (!ctx)
|
||||
return -ENOMEM;
|
||||
|
||||
nvc0_graph_load_context(chan);
|
||||
|
||||
nv_wo32(grch->grctx, 0x1c, 1);
|
||||
nv_wo32(grch->grctx, 0x20, 0);
|
||||
nv_wo32(grch->grctx, 0x28, 0);
|
||||
nv_wo32(grch->grctx, 0x2c, 0);
|
||||
dev_priv->engine.instmem.flush(dev);
|
||||
|
||||
ret = nvc0_grctx_generate(chan);
|
||||
if (ret) {
|
||||
kfree(ctx);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = nvc0_graph_unload_context_to(dev, chan->ramin->vinst);
|
||||
if (ret) {
|
||||
kfree(ctx);
|
||||
return ret;
|
||||
}
|
||||
|
||||
for (i = 0; i < priv->grctx_size; i += 4)
|
||||
ctx[i / 4] = nv_ro32(grch->grctx, i);
|
||||
|
||||
priv->grctx_vals = ctx;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
nvc0_graph_create_context_mmio_list(struct nouveau_channel *chan)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
|
||||
struct nvc0_graph_priv *priv = dev_priv->engine.graph.priv;
|
||||
struct nvc0_graph_chan *grch = chan->pgraph_ctx;
|
||||
struct drm_device *dev = chan->dev;
|
||||
int i = 0, gpc, tp, ret;
|
||||
u32 magic;
|
||||
|
||||
ret = nouveau_gpuobj_new(dev, NULL, 0x2000, 256, NVOBJ_FLAG_VM,
|
||||
&grch->unk408004);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = nouveau_gpuobj_new(dev, NULL, 0x8000, 256, NVOBJ_FLAG_VM,
|
||||
&grch->unk40800c);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = nouveau_gpuobj_new(dev, NULL, 384 * 1024, 4096, NVOBJ_FLAG_VM,
|
||||
&grch->unk418810);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = nouveau_gpuobj_new(dev, NULL, 0x1000, 0, NVOBJ_FLAG_VM,
|
||||
&grch->mmio);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
||||
nv_wo32(grch->mmio, i++ * 4, 0x00408004);
|
||||
nv_wo32(grch->mmio, i++ * 4, grch->unk408004->vinst >> 8);
|
||||
nv_wo32(grch->mmio, i++ * 4, 0x00408008);
|
||||
nv_wo32(grch->mmio, i++ * 4, 0x80000018);
|
||||
|
||||
nv_wo32(grch->mmio, i++ * 4, 0x0040800c);
|
||||
nv_wo32(grch->mmio, i++ * 4, grch->unk40800c->vinst >> 8);
|
||||
nv_wo32(grch->mmio, i++ * 4, 0x00408010);
|
||||
nv_wo32(grch->mmio, i++ * 4, 0x80000000);
|
||||
|
||||
nv_wo32(grch->mmio, i++ * 4, 0x00418810);
|
||||
nv_wo32(grch->mmio, i++ * 4, 0x80000000 | grch->unk418810->vinst >> 12);
|
||||
nv_wo32(grch->mmio, i++ * 4, 0x00419848);
|
||||
nv_wo32(grch->mmio, i++ * 4, 0x10000000 | grch->unk418810->vinst >> 12);
|
||||
|
||||
nv_wo32(grch->mmio, i++ * 4, 0x00419004);
|
||||
nv_wo32(grch->mmio, i++ * 4, grch->unk40800c->vinst >> 8);
|
||||
nv_wo32(grch->mmio, i++ * 4, 0x00419008);
|
||||
nv_wo32(grch->mmio, i++ * 4, 0x00000000);
|
||||
|
||||
nv_wo32(grch->mmio, i++ * 4, 0x00418808);
|
||||
nv_wo32(grch->mmio, i++ * 4, grch->unk408004->vinst >> 8);
|
||||
nv_wo32(grch->mmio, i++ * 4, 0x0041880c);
|
||||
nv_wo32(grch->mmio, i++ * 4, 0x80000018);
|
||||
|
||||
magic = 0x02180000;
|
||||
nv_wo32(grch->mmio, i++ * 4, 0x00405830);
|
||||
nv_wo32(grch->mmio, i++ * 4, magic);
|
||||
for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
|
||||
for (tp = 0; tp < priv->tp_nr[gpc]; tp++, magic += 0x02fc) {
|
||||
u32 reg = 0x504520 + (gpc * 0x8000) + (tp * 0x0800);
|
||||
nv_wo32(grch->mmio, i++ * 4, reg);
|
||||
nv_wo32(grch->mmio, i++ * 4, magic);
|
||||
}
|
||||
}
|
||||
|
||||
grch->mmio_nr = i / 2;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int
|
||||
nvc0_graph_create_context(struct nouveau_channel *chan)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
|
||||
struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
|
||||
struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
|
||||
struct nvc0_graph_priv *priv = pgraph->priv;
|
||||
struct nvc0_graph_chan *grch;
|
||||
struct drm_device *dev = chan->dev;
|
||||
struct nouveau_gpuobj *grctx;
|
||||
int ret, i;
|
||||
|
||||
chan->pgraph_ctx = kzalloc(sizeof(*grch), GFP_KERNEL);
|
||||
if (!chan->pgraph_ctx)
|
||||
return -ENOMEM;
|
||||
grch = chan->pgraph_ctx;
|
||||
|
||||
ret = nouveau_gpuobj_new(dev, NULL, priv->grctx_size, 256,
|
||||
NVOBJ_FLAG_VM | NVOBJ_FLAG_ZERO_ALLOC,
|
||||
&grch->grctx);
|
||||
if (ret)
|
||||
goto error;
|
||||
chan->ramin_grctx = grch->grctx;
|
||||
grctx = grch->grctx;
|
||||
|
||||
ret = nvc0_graph_create_context_mmio_list(chan);
|
||||
if (ret)
|
||||
goto error;
|
||||
|
||||
nv_wo32(chan->ramin, 0x0210, lower_32_bits(grctx->vinst) | 4);
|
||||
nv_wo32(chan->ramin, 0x0214, upper_32_bits(grctx->vinst));
|
||||
pinstmem->flush(dev);
|
||||
|
||||
if (!priv->grctx_vals) {
|
||||
ret = nvc0_graph_construct_context(chan);
|
||||
if (ret)
|
||||
goto error;
|
||||
}
|
||||
|
||||
for (i = 0; i < priv->grctx_size; i += 4)
|
||||
nv_wo32(grctx, i, priv->grctx_vals[i / 4]);
|
||||
|
||||
nv_wo32(grctx, 0xf4, 0);
|
||||
nv_wo32(grctx, 0xf8, 0);
|
||||
nv_wo32(grctx, 0x10, grch->mmio_nr);
|
||||
nv_wo32(grctx, 0x14, lower_32_bits(grch->mmio->vinst));
|
||||
nv_wo32(grctx, 0x18, upper_32_bits(grch->mmio->vinst));
|
||||
nv_wo32(grctx, 0x1c, 1);
|
||||
nv_wo32(grctx, 0x20, 0);
|
||||
nv_wo32(grctx, 0x28, 0);
|
||||
nv_wo32(grctx, 0x2c, 0);
|
||||
pinstmem->flush(dev);
|
||||
return 0;
|
||||
|
||||
error:
|
||||
pgraph->destroy_context(chan);
|
||||
return ret;
|
||||
}
|
||||
|
||||
void
|
||||
nvc0_graph_destroy_context(struct nouveau_channel *chan)
|
||||
{
|
||||
struct nvc0_graph_chan *grch;
|
||||
|
||||
grch = chan->pgraph_ctx;
|
||||
chan->pgraph_ctx = NULL;
|
||||
if (!grch)
|
||||
return;
|
||||
|
||||
nouveau_gpuobj_ref(NULL, &grch->mmio);
|
||||
nouveau_gpuobj_ref(NULL, &grch->unk418810);
|
||||
nouveau_gpuobj_ref(NULL, &grch->unk40800c);
|
||||
nouveau_gpuobj_ref(NULL, &grch->unk408004);
|
||||
nouveau_gpuobj_ref(NULL, &grch->grctx);
|
||||
chan->ramin_grctx = NULL;
|
||||
}
|
||||
|
||||
int
|
||||
nvc0_graph_load_context(struct nouveau_channel *chan)
|
||||
{
|
||||
struct drm_device *dev = chan->dev;
|
||||
|
||||
nv_wr32(dev, 0x409840, 0x00000030);
|
||||
nv_wr32(dev, 0x409500, 0x80000000 | chan->ramin->vinst >> 12);
|
||||
nv_wr32(dev, 0x409504, 0x00000003);
|
||||
if (!nv_wait(dev, 0x409800, 0x00000010, 0x00000010))
|
||||
NV_ERROR(dev, "PGRAPH: load_ctx timeout\n");
|
||||
|
||||
printk(KERN_ERR "load_ctx 0x%08x\n", nv_rd32(dev, 0x409b00));
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
nvc0_graph_unload_context_to(struct drm_device *dev, u64 chan)
|
||||
{
|
||||
nv_wr32(dev, 0x409840, 0x00000003);
|
||||
nv_wr32(dev, 0x409500, 0x80000000 | chan >> 12);
|
||||
nv_wr32(dev, 0x409504, 0x00000009);
|
||||
if (!nv_wait(dev, 0x409800, 0x00000001, 0x00000000)) {
|
||||
NV_ERROR(dev, "PGRAPH: unload_ctx timeout\n");
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int
|
||||
nvc0_graph_unload_context(struct drm_device *dev)
|
||||
{
|
||||
return 0;
|
||||
u64 inst = (u64)(nv_rd32(dev, 0x409b00) & 0x0fffffff) << 12;
|
||||
return nvc0_graph_unload_context_to(dev, inst);
|
||||
}
|
||||
|
||||
static void
|
||||
nvc0_graph_destroy(struct drm_device *dev)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
|
||||
struct nvc0_graph_priv *priv;
|
||||
|
||||
priv = pgraph->priv;
|
||||
if (!priv)
|
||||
return;
|
||||
|
||||
nouveau_irq_unregister(dev, 12);
|
||||
|
||||
nouveau_gpuobj_ref(NULL, &priv->unk4188b8);
|
||||
nouveau_gpuobj_ref(NULL, &priv->unk4188b4);
|
||||
|
||||
if (priv->grctx_vals)
|
||||
kfree(priv->grctx_vals);
|
||||
kfree(priv);
|
||||
}
|
||||
|
||||
void
|
||||
nvc0_graph_takedown(struct drm_device *dev)
|
||||
{
|
||||
nvc0_graph_destroy(dev);
|
||||
}
|
||||
|
||||
static int
|
||||
nvc0_graph_create(struct drm_device *dev)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
|
||||
struct nvc0_graph_priv *priv;
|
||||
int ret, gpc, i;
|
||||
|
||||
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
|
||||
if (!priv)
|
||||
return -ENOMEM;
|
||||
pgraph->priv = priv;
|
||||
|
||||
ret = nouveau_gpuobj_new(dev, NULL, 0x1000, 256, 0, &priv->unk4188b4);
|
||||
if (ret)
|
||||
goto error;
|
||||
|
||||
ret = nouveau_gpuobj_new(dev, NULL, 0x1000, 256, 0, &priv->unk4188b8);
|
||||
if (ret)
|
||||
goto error;
|
||||
|
||||
for (i = 0; i < 0x1000; i += 4) {
|
||||
nv_wo32(priv->unk4188b4, i, 0x00000010);
|
||||
nv_wo32(priv->unk4188b8, i, 0x00000010);
|
||||
}
|
||||
|
||||
priv->gpc_nr = nv_rd32(dev, 0x409604) & 0x0000001f;
|
||||
priv->rop_nr = (nv_rd32(dev, 0x409604) & 0x001f0000) >> 16;
|
||||
for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
|
||||
priv->tp_nr[gpc] = nv_rd32(dev, GPC_UNIT(gpc, 0x2608));
|
||||
priv->tp_total += priv->tp_nr[gpc];
|
||||
}
|
||||
|
||||
/*XXX: these need figuring out... */
|
||||
switch (dev_priv->chipset) {
|
||||
case 0xc0:
|
||||
if (priv->tp_total == 11) { /* 465, 3/4/4/0, 4 */
|
||||
priv->magic_not_rop_nr = 0x07;
|
||||
priv->magic419bd0 = 0x0a360000;
|
||||
priv->magic419be4 = 0x04c33a54;
|
||||
/* filled values up to tp_total, the rest 0 */
|
||||
priv->magicgpc980[0] = 0x22111000;
|
||||
priv->magicgpc980[1] = 0x00000233;
|
||||
priv->magicgpc980[2] = 0x00000000;
|
||||
priv->magicgpc980[3] = 0x00000000;
|
||||
priv->magicgpc918 = 0x000ba2e9;
|
||||
} else
|
||||
if (priv->tp_total == 14) { /* 470, 3/3/4/4, 5 */
|
||||
priv->magic_not_rop_nr = 0x05;
|
||||
priv->magic419bd0 = 0x043c0000;
|
||||
priv->magic419be4 = 0x09041208;
|
||||
priv->magicgpc980[0] = 0x11110000;
|
||||
priv->magicgpc980[1] = 0x00233222;
|
||||
priv->magicgpc980[2] = 0x00000000;
|
||||
priv->magicgpc980[3] = 0x00000000;
|
||||
priv->magicgpc918 = 0x00092493;
|
||||
} else
|
||||
if (priv->tp_total == 15) { /* 480, 3/4/4/4, 6 */
|
||||
priv->magic_not_rop_nr = 0x06;
|
||||
priv->magic419bd0 = 0x023e0000;
|
||||
priv->magic419be4 = 0x10414104;
|
||||
priv->magicgpc980[0] = 0x11110000;
|
||||
priv->magicgpc980[1] = 0x03332222;
|
||||
priv->magicgpc980[2] = 0x00000000;
|
||||
priv->magicgpc980[3] = 0x00000000;
|
||||
priv->magicgpc918 = 0x00088889;
|
||||
}
|
||||
break;
|
||||
case 0xc3: /* 450, 4/0/0/0, 2 */
|
||||
priv->magic_not_rop_nr = 0x03;
|
||||
priv->magic419bd0 = 0x00500000;
|
||||
priv->magic419be4 = 0x00000000;
|
||||
priv->magicgpc980[0] = 0x00003210;
|
||||
priv->magicgpc980[1] = 0x00000000;
|
||||
priv->magicgpc980[2] = 0x00000000;
|
||||
priv->magicgpc980[3] = 0x00000000;
|
||||
priv->magicgpc918 = 0x00200000;
|
||||
break;
|
||||
case 0xc4: /* 460, 3/4/0/0, 4 */
|
||||
priv->magic_not_rop_nr = 0x01;
|
||||
priv->magic419bd0 = 0x045c0000;
|
||||
priv->magic419be4 = 0x09041208;
|
||||
priv->magicgpc980[0] = 0x02321100;
|
||||
priv->magicgpc980[1] = 0x00000000;
|
||||
priv->magicgpc980[2] = 0x00000000;
|
||||
priv->magicgpc980[3] = 0x00000000;
|
||||
priv->magicgpc918 = 0x00124925;
|
||||
break;
|
||||
}
|
||||
|
||||
if (!priv->magic419bd0) {
|
||||
NV_ERROR(dev, "PGRAPH: unknown config: %d/%d/%d/%d, %d\n",
|
||||
priv->tp_nr[0], priv->tp_nr[1], priv->tp_nr[2],
|
||||
priv->tp_nr[3], priv->rop_nr);
|
||||
/* use 0xc3's values... */
|
||||
priv->magic_not_rop_nr = 0x03;
|
||||
priv->magic419bd0 = 0x00500000;
|
||||
priv->magic419be4 = 0x00000000;
|
||||
priv->magicgpc980[0] = 0x00003210;
|
||||
priv->magicgpc980[1] = 0x00000000;
|
||||
priv->magicgpc980[2] = 0x00000000;
|
||||
priv->magicgpc980[3] = 0x00000000;
|
||||
priv->magicgpc918 = 0x00200000;
|
||||
}
|
||||
|
||||
nouveau_irq_register(dev, 12, nvc0_graph_isr);
|
||||
NVOBJ_CLASS(dev, 0x902d, GR); /* 2D */
|
||||
NVOBJ_CLASS(dev, 0x9039, GR); /* M2MF */
|
||||
NVOBJ_CLASS(dev, 0x9097, GR); /* 3D */
|
||||
NVOBJ_CLASS(dev, 0x90c0, GR); /* COMPUTE */
|
||||
return 0;
|
||||
|
||||
error:
|
||||
nvc0_graph_destroy(dev);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void
|
||||
nvc0_graph_init_obj418880(struct drm_device *dev)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
|
||||
struct nvc0_graph_priv *priv = pgraph->priv;
|
||||
int i;
|
||||
|
||||
nv_wr32(dev, GPC_BCAST(0x0880), 0x00000000);
|
||||
nv_wr32(dev, GPC_BCAST(0x08a4), 0x00000000);
|
||||
for (i = 0; i < 4; i++)
|
||||
nv_wr32(dev, GPC_BCAST(0x0888) + (i * 4), 0x00000000);
|
||||
nv_wr32(dev, GPC_BCAST(0x08b4), priv->unk4188b4->vinst >> 8);
|
||||
nv_wr32(dev, GPC_BCAST(0x08b8), priv->unk4188b8->vinst >> 8);
|
||||
}
|
||||
|
||||
static void
|
||||
nvc0_graph_init_regs(struct drm_device *dev)
|
||||
{
|
||||
nv_wr32(dev, 0x400080, 0x003083c2);
|
||||
nv_wr32(dev, 0x400088, 0x00006fe7);
|
||||
nv_wr32(dev, 0x40008c, 0x00000000);
|
||||
nv_wr32(dev, 0x400090, 0x00000030);
|
||||
nv_wr32(dev, 0x40013c, 0x013901f7);
|
||||
nv_wr32(dev, 0x400140, 0x00000100);
|
||||
nv_wr32(dev, 0x400144, 0x00000000);
|
||||
nv_wr32(dev, 0x400148, 0x00000110);
|
||||
nv_wr32(dev, 0x400138, 0x00000000);
|
||||
nv_wr32(dev, 0x400130, 0x00000000);
|
||||
nv_wr32(dev, 0x400134, 0x00000000);
|
||||
nv_wr32(dev, 0x400124, 0x00000002);
|
||||
}
|
||||
|
||||
static void
|
||||
nvc0_graph_init_gpc_0(struct drm_device *dev)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nvc0_graph_priv *priv = dev_priv->engine.graph.priv;
|
||||
int gpc;
|
||||
|
||||
// TP ROP UNKVAL(magic_not_rop_nr)
|
||||
// 450: 4/0/0/0 2 3
|
||||
// 460: 3/4/0/0 4 1
|
||||
// 465: 3/4/4/0 4 7
|
||||
// 470: 3/3/4/4 5 5
|
||||
// 480: 3/4/4/4 6 6
|
||||
|
||||
// magicgpc918
|
||||
// 450: 00200000 00000000001000000000000000000000
|
||||
// 460: 00124925 00000000000100100100100100100101
|
||||
// 465: 000ba2e9 00000000000010111010001011101001
|
||||
// 470: 00092493 00000000000010010010010010010011
|
||||
// 480: 00088889 00000000000010001000100010001001
|
||||
|
||||
/* filled values up to tp_total, remainder 0 */
|
||||
// 450: 00003210 00000000 00000000 00000000
|
||||
// 460: 02321100 00000000 00000000 00000000
|
||||
// 465: 22111000 00000233 00000000 00000000
|
||||
// 470: 11110000 00233222 00000000 00000000
|
||||
// 480: 11110000 03332222 00000000 00000000
|
||||
|
||||
nv_wr32(dev, GPC_BCAST(0x0980), priv->magicgpc980[0]);
|
||||
nv_wr32(dev, GPC_BCAST(0x0984), priv->magicgpc980[1]);
|
||||
nv_wr32(dev, GPC_BCAST(0x0988), priv->magicgpc980[2]);
|
||||
nv_wr32(dev, GPC_BCAST(0x098c), priv->magicgpc980[3]);
|
||||
|
||||
for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
|
||||
nv_wr32(dev, GPC_UNIT(gpc, 0x0914), priv->magic_not_rop_nr << 8 |
|
||||
priv->tp_nr[gpc]);
|
||||
nv_wr32(dev, GPC_UNIT(gpc, 0x0910), 0x00040000 | priv->tp_total);
|
||||
nv_wr32(dev, GPC_UNIT(gpc, 0x0918), priv->magicgpc918);
|
||||
}
|
||||
|
||||
nv_wr32(dev, GPC_BCAST(0x1bd4), priv->magicgpc918);
|
||||
nv_wr32(dev, GPC_BCAST(0x08ac), priv->rop_nr);
|
||||
}
|
||||
|
||||
static void
|
||||
nvc0_graph_init_units(struct drm_device *dev)
|
||||
{
|
||||
nv_wr32(dev, 0x409c24, 0x000f0000);
|
||||
nv_wr32(dev, 0x404000, 0xc0000000); /* DISPATCH */
|
||||
nv_wr32(dev, 0x404600, 0xc0000000); /* M2MF */
|
||||
nv_wr32(dev, 0x408030, 0xc0000000);
|
||||
nv_wr32(dev, 0x40601c, 0xc0000000);
|
||||
nv_wr32(dev, 0x404490, 0xc0000000); /* MACRO */
|
||||
nv_wr32(dev, 0x406018, 0xc0000000);
|
||||
nv_wr32(dev, 0x405840, 0xc0000000);
|
||||
nv_wr32(dev, 0x405844, 0x00ffffff);
|
||||
nv_mask(dev, 0x419cc0, 0x00000008, 0x00000008);
|
||||
nv_mask(dev, 0x419eb4, 0x00001000, 0x00001000);
|
||||
}
|
||||
|
||||
static void
|
||||
nvc0_graph_init_gpc_1(struct drm_device *dev)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nvc0_graph_priv *priv = dev_priv->engine.graph.priv;
|
||||
int gpc, tp;
|
||||
|
||||
for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
|
||||
nv_wr32(dev, GPC_UNIT(gpc, 0x0420), 0xc0000000);
|
||||
nv_wr32(dev, GPC_UNIT(gpc, 0x0900), 0xc0000000);
|
||||
nv_wr32(dev, GPC_UNIT(gpc, 0x1028), 0xc0000000);
|
||||
nv_wr32(dev, GPC_UNIT(gpc, 0x0824), 0xc0000000);
|
||||
for (tp = 0; tp < priv->tp_nr[gpc]; tp++) {
|
||||
nv_wr32(dev, TP_UNIT(gpc, tp, 0x508), 0xffffffff);
|
||||
nv_wr32(dev, TP_UNIT(gpc, tp, 0x50c), 0xffffffff);
|
||||
nv_wr32(dev, TP_UNIT(gpc, tp, 0x224), 0xc0000000);
|
||||
nv_wr32(dev, TP_UNIT(gpc, tp, 0x48c), 0xc0000000);
|
||||
nv_wr32(dev, TP_UNIT(gpc, tp, 0x084), 0xc0000000);
|
||||
nv_wr32(dev, TP_UNIT(gpc, tp, 0xe44), 0x001ffffe);
|
||||
nv_wr32(dev, TP_UNIT(gpc, tp, 0xe4c), 0x0000000f);
|
||||
}
|
||||
nv_wr32(dev, GPC_UNIT(gpc, 0x2c90), 0xffffffff);
|
||||
nv_wr32(dev, GPC_UNIT(gpc, 0x2c94), 0xffffffff);
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
nvc0_graph_init_rop(struct drm_device *dev)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nvc0_graph_priv *priv = dev_priv->engine.graph.priv;
|
||||
int rop;
|
||||
|
||||
for (rop = 0; rop < priv->rop_nr; rop++) {
|
||||
nv_wr32(dev, ROP_UNIT(rop, 0x144), 0xc0000000);
|
||||
nv_wr32(dev, ROP_UNIT(rop, 0x070), 0xc0000000);
|
||||
nv_wr32(dev, ROP_UNIT(rop, 0x204), 0xffffffff);
|
||||
nv_wr32(dev, ROP_UNIT(rop, 0x208), 0xffffffff);
|
||||
}
|
||||
}
|
||||
|
||||
static int
|
||||
nvc0_fuc_load_fw(struct drm_device *dev, u32 fuc_base,
|
||||
const char *code_fw, const char *data_fw)
|
||||
{
|
||||
const struct firmware *fw;
|
||||
char name[32];
|
||||
int ret, i;
|
||||
|
||||
snprintf(name, sizeof(name), "nouveau/%s", data_fw);
|
||||
ret = request_firmware(&fw, name, &dev->pdev->dev);
|
||||
if (ret) {
|
||||
NV_ERROR(dev, "failed to load %s\n", data_fw);
|
||||
return ret;
|
||||
}
|
||||
|
||||
nv_wr32(dev, fuc_base + 0x01c0, 0x01000000);
|
||||
for (i = 0; i < fw->size / 4; i++)
|
||||
nv_wr32(dev, fuc_base + 0x01c4, ((u32 *)fw->data)[i]);
|
||||
release_firmware(fw);
|
||||
|
||||
snprintf(name, sizeof(name), "nouveau/%s", code_fw);
|
||||
ret = request_firmware(&fw, name, &dev->pdev->dev);
|
||||
if (ret) {
|
||||
NV_ERROR(dev, "failed to load %s\n", code_fw);
|
||||
return ret;
|
||||
}
|
||||
|
||||
nv_wr32(dev, fuc_base + 0x0180, 0x01000000);
|
||||
for (i = 0; i < fw->size / 4; i++) {
|
||||
if ((i & 0x3f) == 0)
|
||||
nv_wr32(dev, fuc_base + 0x0188, i >> 6);
|
||||
nv_wr32(dev, fuc_base + 0x0184, ((u32 *)fw->data)[i]);
|
||||
}
|
||||
release_firmware(fw);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
nvc0_graph_init_ctxctl(struct drm_device *dev)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nvc0_graph_priv *priv = dev_priv->engine.graph.priv;
|
||||
u32 r000260;
|
||||
int ret;
|
||||
|
||||
/* load fuc microcode */
|
||||
r000260 = nv_mask(dev, 0x000260, 0x00000001, 0x00000000);
|
||||
ret = nvc0_fuc_load_fw(dev, 0x409000, "fuc409c", "fuc409d");
|
||||
if (ret == 0)
|
||||
nvc0_fuc_load_fw(dev, 0x41a000, "fuc41ac", "fuc41ad");
|
||||
nv_wr32(dev, 0x000260, r000260);
|
||||
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* start both of them running */
|
||||
nv_wr32(dev, 0x409840, 0xffffffff);
|
||||
nv_wr32(dev, 0x41a10c, 0x00000000);
|
||||
nv_wr32(dev, 0x40910c, 0x00000000);
|
||||
nv_wr32(dev, 0x41a100, 0x00000002);
|
||||
nv_wr32(dev, 0x409100, 0x00000002);
|
||||
if (!nv_wait(dev, 0x409800, 0x00000001, 0x00000001))
|
||||
NV_INFO(dev, "0x409800 wait failed\n");
|
||||
|
||||
nv_wr32(dev, 0x409840, 0xffffffff);
|
||||
nv_wr32(dev, 0x409500, 0x7fffffff);
|
||||
nv_wr32(dev, 0x409504, 0x00000021);
|
||||
|
||||
nv_wr32(dev, 0x409840, 0xffffffff);
|
||||
nv_wr32(dev, 0x409500, 0x00000000);
|
||||
nv_wr32(dev, 0x409504, 0x00000010);
|
||||
if (!nv_wait_ne(dev, 0x409800, 0xffffffff, 0x00000000)) {
|
||||
NV_ERROR(dev, "fuc09 req 0x10 timeout\n");
|
||||
return -EBUSY;
|
||||
}
|
||||
priv->grctx_size = nv_rd32(dev, 0x409800);
|
||||
|
||||
nv_wr32(dev, 0x409840, 0xffffffff);
|
||||
nv_wr32(dev, 0x409500, 0x00000000);
|
||||
nv_wr32(dev, 0x409504, 0x00000016);
|
||||
if (!nv_wait_ne(dev, 0x409800, 0xffffffff, 0x00000000)) {
|
||||
NV_ERROR(dev, "fuc09 req 0x16 timeout\n");
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
nv_wr32(dev, 0x409840, 0xffffffff);
|
||||
nv_wr32(dev, 0x409500, 0x00000000);
|
||||
nv_wr32(dev, 0x409504, 0x00000025);
|
||||
if (!nv_wait_ne(dev, 0x409800, 0xffffffff, 0x00000000)) {
|
||||
NV_ERROR(dev, "fuc09 req 0x25 timeout\n");
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int
|
||||
nvc0_graph_init(struct drm_device *dev)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
|
||||
struct nvc0_graph_priv *priv;
|
||||
int ret;
|
||||
|
||||
dev_priv->engine.graph.accel_blocked = true;
|
||||
|
||||
switch (dev_priv->chipset) {
|
||||
case 0xc0:
|
||||
case 0xc3:
|
||||
case 0xc4:
|
||||
break;
|
||||
default:
|
||||
NV_ERROR(dev, "PGRAPH: unsupported chipset, please report!\n");
|
||||
if (nouveau_noaccel != 0)
|
||||
return 0;
|
||||
break;
|
||||
}
|
||||
|
||||
nv_mask(dev, 0x000200, 0x18001000, 0x00000000);
|
||||
nv_mask(dev, 0x000200, 0x18001000, 0x18001000);
|
||||
|
||||
if (!pgraph->priv) {
|
||||
ret = nvc0_graph_create(dev);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
priv = pgraph->priv;
|
||||
|
||||
nvc0_graph_init_obj418880(dev);
|
||||
nvc0_graph_init_regs(dev);
|
||||
//nvc0_graph_init_unitplemented_magics(dev);
|
||||
nvc0_graph_init_gpc_0(dev);
|
||||
//nvc0_graph_init_unitplemented_c242(dev);
|
||||
|
||||
nv_wr32(dev, 0x400500, 0x00010001);
|
||||
nv_wr32(dev, 0x400100, 0xffffffff);
|
||||
nv_wr32(dev, 0x40013c, 0xffffffff);
|
||||
|
||||
nvc0_graph_init_units(dev);
|
||||
nvc0_graph_init_gpc_1(dev);
|
||||
nvc0_graph_init_rop(dev);
|
||||
|
||||
nv_wr32(dev, 0x400108, 0xffffffff);
|
||||
nv_wr32(dev, 0x400138, 0xffffffff);
|
||||
nv_wr32(dev, 0x400118, 0xffffffff);
|
||||
nv_wr32(dev, 0x400130, 0xffffffff);
|
||||
nv_wr32(dev, 0x40011c, 0xffffffff);
|
||||
nv_wr32(dev, 0x400134, 0xffffffff);
|
||||
nv_wr32(dev, 0x400054, 0x34ce3464);
|
||||
|
||||
ret = nvc0_graph_init_ctxctl(dev);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
dev_priv->engine.graph.accel_blocked = false;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct nouveau_enum nvc0_graph_data_error[] = {
|
||||
{ 5, "INVALID_ENUM" },
|
||||
{}
|
||||
};
|
||||
|
||||
static int
|
||||
nvc0_graph_isr_chid(struct drm_device *dev, u64 inst)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nouveau_channel *chan;
|
||||
unsigned long flags;
|
||||
int i;
|
||||
|
||||
spin_lock_irqsave(&dev_priv->channels.lock, flags);
|
||||
for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
|
||||
chan = dev_priv->channels.ptr[i];
|
||||
if (!chan || !chan->ramin)
|
||||
continue;
|
||||
|
||||
if (inst == chan->ramin->vinst)
|
||||
break;
|
||||
}
|
||||
spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
|
||||
return i;
|
||||
}
|
||||
|
||||
static void
|
||||
nvc0_graph_isr(struct drm_device *dev)
|
||||
{
|
||||
u64 inst = (u64)(nv_rd32(dev, 0x409b00) & 0x0fffffff) << 12;
|
||||
u32 chid = nvc0_graph_isr_chid(dev, inst);
|
||||
u32 stat = nv_rd32(dev, 0x400100);
|
||||
u32 addr = nv_rd32(dev, 0x400704);
|
||||
u32 mthd = (addr & 0x00003ffc);
|
||||
u32 subc = (addr & 0x00070000) >> 16;
|
||||
u32 data = nv_rd32(dev, 0x400708);
|
||||
u32 code = nv_rd32(dev, 0x400110);
|
||||
u32 class = nv_rd32(dev, 0x404200 + (subc * 4));
|
||||
|
||||
if (stat & 0x00000010) {
|
||||
NV_INFO(dev, "PGRAPH: ILLEGAL_MTHD ch %d [0x%010llx] subc %d "
|
||||
"class 0x%04x mthd 0x%04x data 0x%08x\n",
|
||||
chid, inst, subc, class, mthd, data);
|
||||
nv_wr32(dev, 0x400100, 0x00000010);
|
||||
stat &= ~0x00000010;
|
||||
}
|
||||
|
||||
if (stat & 0x00100000) {
|
||||
NV_INFO(dev, "PGRAPH: DATA_ERROR [");
|
||||
nouveau_enum_print(nvc0_graph_data_error, code);
|
||||
printk("] ch %d [0x%010llx] subc %d class 0x%04x "
|
||||
"mthd 0x%04x data 0x%08x\n",
|
||||
chid, inst, subc, class, mthd, data);
|
||||
nv_wr32(dev, 0x400100, 0x00100000);
|
||||
stat &= ~0x00100000;
|
||||
}
|
||||
|
||||
if (stat & 0x00080000) {
|
||||
u32 ustat = nv_rd32(dev, 0x409c18);
|
||||
|
||||
NV_INFO(dev, "PGRAPH: CTXCTRL ustat 0x%08x\n", ustat);
|
||||
|
||||
nv_wr32(dev, 0x409c20, ustat);
|
||||
nv_wr32(dev, 0x400100, 0x00080000);
|
||||
stat &= ~0x00080000;
|
||||
}
|
||||
|
||||
if (stat) {
|
||||
NV_INFO(dev, "PGRAPH: unknown stat 0x%08x\n", stat);
|
||||
nv_wr32(dev, 0x400100, stat);
|
||||
}
|
||||
|
||||
nv_wr32(dev, 0x400500, 0x00010001);
|
||||
}
|
||||
|
66
drivers/gpu/drm/nouveau/nvc0_graph.h
Normal file
66
drivers/gpu/drm/nouveau/nvc0_graph.h
Normal file
@ -0,0 +1,66 @@
|
||||
/*
|
||||
* Copyright 2010 Red Hat Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: Ben Skeggs
|
||||
*/
|
||||
|
||||
#ifndef __NVC0_GRAPH_H__
|
||||
#define __NVC0_GRAPH_H__
|
||||
|
||||
#define GPC_MAX 4
|
||||
#define TP_MAX 32
|
||||
|
||||
#define ROP_BCAST(r) (0x408800 + (r))
|
||||
#define ROP_UNIT(u,r) (0x410000 + (u) * 0x400 + (r))
|
||||
#define GPC_BCAST(r) (0x418000 + (r))
|
||||
#define GPC_UNIT(t,r) (0x500000 + (t) * 0x8000 + (r))
|
||||
#define TP_UNIT(t,m,r) (0x504000 + (t) * 0x8000 + (m) * 0x800 + (r))
|
||||
|
||||
struct nvc0_graph_priv {
|
||||
u8 gpc_nr;
|
||||
u8 rop_nr;
|
||||
u8 tp_nr[GPC_MAX];
|
||||
u8 tp_total;
|
||||
|
||||
u32 grctx_size;
|
||||
u32 *grctx_vals;
|
||||
struct nouveau_gpuobj *unk4188b4;
|
||||
struct nouveau_gpuobj *unk4188b8;
|
||||
|
||||
u8 magic_not_rop_nr;
|
||||
u32 magic419bd0;
|
||||
u32 magic419be4;
|
||||
u32 magicgpc980[4];
|
||||
u32 magicgpc918;
|
||||
};
|
||||
|
||||
struct nvc0_graph_chan {
|
||||
struct nouveau_gpuobj *grctx;
|
||||
struct nouveau_gpuobj *unk408004; // 0x418810 too
|
||||
struct nouveau_gpuobj *unk40800c; // 0x419004 too
|
||||
struct nouveau_gpuobj *unk418810; // 0x419848 too
|
||||
struct nouveau_gpuobj *mmio;
|
||||
int mmio_nr;
|
||||
};
|
||||
|
||||
int nvc0_grctx_generate(struct nouveau_channel *);
|
||||
|
||||
#endif
|
2854
drivers/gpu/drm/nouveau/nvc0_grctx.c
Normal file
2854
drivers/gpu/drm/nouveau/nvc0_grctx.c
Normal file
File diff suppressed because it is too large
Load Diff
@ -25,159 +25,22 @@
|
||||
#include "drmP.h"
|
||||
|
||||
#include "nouveau_drv.h"
|
||||
#include "nouveau_vm.h"
|
||||
|
||||
struct nvc0_gpuobj_node {
|
||||
struct nouveau_bo *vram;
|
||||
struct drm_mm_node *ramin;
|
||||
u32 align;
|
||||
struct nvc0_instmem_priv {
|
||||
struct nouveau_gpuobj *bar1_pgd;
|
||||
struct nouveau_channel *bar1;
|
||||
struct nouveau_gpuobj *bar3_pgd;
|
||||
struct nouveau_channel *bar3;
|
||||
struct nouveau_gpuobj *chan_pgd;
|
||||
};
|
||||
|
||||
int
|
||||
nvc0_instmem_get(struct nouveau_gpuobj *gpuobj, u32 size, u32 align)
|
||||
{
|
||||
struct drm_device *dev = gpuobj->dev;
|
||||
struct nvc0_gpuobj_node *node = NULL;
|
||||
int ret;
|
||||
|
||||
node = kzalloc(sizeof(*node), GFP_KERNEL);
|
||||
if (!node)
|
||||
return -ENOMEM;
|
||||
node->align = align;
|
||||
|
||||
ret = nouveau_bo_new(dev, NULL, size, align, TTM_PL_FLAG_VRAM,
|
||||
0, 0x0000, true, false, &node->vram);
|
||||
if (ret) {
|
||||
NV_ERROR(dev, "error getting PRAMIN backing pages: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = nouveau_bo_pin(node->vram, TTM_PL_FLAG_VRAM);
|
||||
if (ret) {
|
||||
NV_ERROR(dev, "error pinning PRAMIN backing VRAM: %d\n", ret);
|
||||
nouveau_bo_ref(NULL, &node->vram);
|
||||
return ret;
|
||||
}
|
||||
|
||||
gpuobj->vinst = node->vram->bo.mem.start << PAGE_SHIFT;
|
||||
gpuobj->size = node->vram->bo.mem.num_pages << PAGE_SHIFT;
|
||||
gpuobj->node = node;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void
|
||||
nvc0_instmem_put(struct nouveau_gpuobj *gpuobj)
|
||||
{
|
||||
struct nvc0_gpuobj_node *node;
|
||||
|
||||
node = gpuobj->node;
|
||||
gpuobj->node = NULL;
|
||||
|
||||
nouveau_bo_unpin(node->vram);
|
||||
nouveau_bo_ref(NULL, &node->vram);
|
||||
kfree(node);
|
||||
}
|
||||
|
||||
int
|
||||
nvc0_instmem_map(struct nouveau_gpuobj *gpuobj)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private;
|
||||
struct nvc0_gpuobj_node *node = gpuobj->node;
|
||||
struct drm_device *dev = gpuobj->dev;
|
||||
struct drm_mm_node *ramin = NULL;
|
||||
u32 pte, pte_end;
|
||||
u64 vram;
|
||||
|
||||
do {
|
||||
if (drm_mm_pre_get(&dev_priv->ramin_heap))
|
||||
return -ENOMEM;
|
||||
|
||||
spin_lock(&dev_priv->ramin_lock);
|
||||
ramin = drm_mm_search_free(&dev_priv->ramin_heap, gpuobj->size,
|
||||
node->align, 0);
|
||||
if (ramin == NULL) {
|
||||
spin_unlock(&dev_priv->ramin_lock);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
ramin = drm_mm_get_block_atomic(ramin, gpuobj->size, node->align);
|
||||
spin_unlock(&dev_priv->ramin_lock);
|
||||
} while (ramin == NULL);
|
||||
|
||||
pte = (ramin->start >> 12) << 1;
|
||||
pte_end = ((ramin->size >> 12) << 1) + pte;
|
||||
vram = gpuobj->vinst;
|
||||
|
||||
NV_DEBUG(dev, "pramin=0x%lx, pte=%d, pte_end=%d\n",
|
||||
ramin->start, pte, pte_end);
|
||||
NV_DEBUG(dev, "first vram page: 0x%010llx\n", gpuobj->vinst);
|
||||
|
||||
while (pte < pte_end) {
|
||||
nv_wr32(dev, 0x702000 + (pte * 8), (vram >> 8) | 1);
|
||||
nv_wr32(dev, 0x702004 + (pte * 8), 0);
|
||||
vram += 4096;
|
||||
pte++;
|
||||
}
|
||||
dev_priv->engine.instmem.flush(dev);
|
||||
|
||||
if (1) {
|
||||
u32 chan = nv_rd32(dev, 0x1700) << 16;
|
||||
nv_wr32(dev, 0x100cb8, (chan + 0x1000) >> 8);
|
||||
nv_wr32(dev, 0x100cbc, 0x80000005);
|
||||
}
|
||||
|
||||
node->ramin = ramin;
|
||||
gpuobj->pinst = ramin->start;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void
|
||||
nvc0_instmem_unmap(struct nouveau_gpuobj *gpuobj)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private;
|
||||
struct nvc0_gpuobj_node *node = gpuobj->node;
|
||||
u32 pte, pte_end;
|
||||
|
||||
if (!node->ramin || !dev_priv->ramin_available)
|
||||
return;
|
||||
|
||||
pte = (node->ramin->start >> 12) << 1;
|
||||
pte_end = ((node->ramin->size >> 12) << 1) + pte;
|
||||
|
||||
while (pte < pte_end) {
|
||||
nv_wr32(gpuobj->dev, 0x702000 + (pte * 8), 0);
|
||||
nv_wr32(gpuobj->dev, 0x702004 + (pte * 8), 0);
|
||||
pte++;
|
||||
}
|
||||
dev_priv->engine.instmem.flush(gpuobj->dev);
|
||||
|
||||
spin_lock(&dev_priv->ramin_lock);
|
||||
drm_mm_put_block(node->ramin);
|
||||
node->ramin = NULL;
|
||||
spin_unlock(&dev_priv->ramin_lock);
|
||||
}
|
||||
|
||||
void
|
||||
nvc0_instmem_flush(struct drm_device *dev)
|
||||
{
|
||||
nv_wr32(dev, 0x070000, 1);
|
||||
if (!nv_wait(dev, 0x070000, 0x00000002, 0x00000000))
|
||||
NV_ERROR(dev, "PRAMIN flush timeout\n");
|
||||
}
|
||||
|
||||
int
|
||||
nvc0_instmem_suspend(struct drm_device *dev)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
u32 *buf;
|
||||
int i;
|
||||
|
||||
dev_priv->susres.ramin_copy = vmalloc(65536);
|
||||
if (!dev_priv->susres.ramin_copy)
|
||||
return -ENOMEM;
|
||||
buf = dev_priv->susres.ramin_copy;
|
||||
|
||||
for (i = 0; i < 65536; i += 4)
|
||||
buf[i/4] = nv_rd32(dev, NV04_PRAMIN + i);
|
||||
dev_priv->ramin_available = false;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -185,73 +48,184 @@ void
|
||||
nvc0_instmem_resume(struct drm_device *dev)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
u32 *buf = dev_priv->susres.ramin_copy;
|
||||
u64 chan;
|
||||
int i;
|
||||
struct nvc0_instmem_priv *priv = dev_priv->engine.instmem.priv;
|
||||
|
||||
chan = dev_priv->vram_size - dev_priv->ramin_rsvd_vram;
|
||||
nv_wr32(dev, 0x001700, chan >> 16);
|
||||
nv_mask(dev, 0x100c80, 0x00000001, 0x00000000);
|
||||
nv_wr32(dev, 0x001704, 0x80000000 | priv->bar1->ramin->vinst >> 12);
|
||||
nv_wr32(dev, 0x001714, 0xc0000000 | priv->bar3->ramin->vinst >> 12);
|
||||
dev_priv->ramin_available = true;
|
||||
}
|
||||
|
||||
for (i = 0; i < 65536; i += 4)
|
||||
nv_wr32(dev, NV04_PRAMIN + i, buf[i/4]);
|
||||
vfree(dev_priv->susres.ramin_copy);
|
||||
dev_priv->susres.ramin_copy = NULL;
|
||||
static void
|
||||
nvc0_channel_del(struct nouveau_channel **pchan)
|
||||
{
|
||||
struct nouveau_channel *chan;
|
||||
|
||||
nv_wr32(dev, 0x001714, 0xc0000000 | (chan >> 12));
|
||||
chan = *pchan;
|
||||
*pchan = NULL;
|
||||
if (!chan)
|
||||
return;
|
||||
|
||||
nouveau_vm_ref(NULL, &chan->vm, NULL);
|
||||
if (chan->ramin_heap.free_stack.next)
|
||||
drm_mm_takedown(&chan->ramin_heap);
|
||||
nouveau_gpuobj_ref(NULL, &chan->ramin);
|
||||
kfree(chan);
|
||||
}
|
||||
|
||||
static int
|
||||
nvc0_channel_new(struct drm_device *dev, u32 size, struct nouveau_vm *vm,
|
||||
struct nouveau_channel **pchan,
|
||||
struct nouveau_gpuobj *pgd, u64 vm_size)
|
||||
{
|
||||
struct nouveau_channel *chan;
|
||||
int ret;
|
||||
|
||||
chan = kzalloc(sizeof(*chan), GFP_KERNEL);
|
||||
if (!chan)
|
||||
return -ENOMEM;
|
||||
chan->dev = dev;
|
||||
|
||||
ret = nouveau_gpuobj_new(dev, NULL, size, 0x1000, 0, &chan->ramin);
|
||||
if (ret) {
|
||||
nvc0_channel_del(&chan);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = drm_mm_init(&chan->ramin_heap, 0x1000, size - 0x1000);
|
||||
if (ret) {
|
||||
nvc0_channel_del(&chan);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = nouveau_vm_ref(vm, &chan->vm, NULL);
|
||||
if (ret) {
|
||||
nvc0_channel_del(&chan);
|
||||
return ret;
|
||||
}
|
||||
|
||||
nv_wo32(chan->ramin, 0x0200, lower_32_bits(pgd->vinst));
|
||||
nv_wo32(chan->ramin, 0x0204, upper_32_bits(pgd->vinst));
|
||||
nv_wo32(chan->ramin, 0x0208, lower_32_bits(vm_size - 1));
|
||||
nv_wo32(chan->ramin, 0x020c, upper_32_bits(vm_size - 1));
|
||||
|
||||
*pchan = chan;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int
|
||||
nvc0_instmem_init(struct drm_device *dev)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
u64 chan, pgt3, imem, lim3 = dev_priv->ramin_size - 1;
|
||||
int ret, i;
|
||||
struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
|
||||
struct pci_dev *pdev = dev->pdev;
|
||||
struct nvc0_instmem_priv *priv;
|
||||
struct nouveau_vm *vm = NULL;
|
||||
int ret;
|
||||
|
||||
dev_priv->ramin_rsvd_vram = 1 * 1024 * 1024;
|
||||
chan = dev_priv->vram_size - dev_priv->ramin_rsvd_vram;
|
||||
imem = 4096 + 4096 + 32768;
|
||||
|
||||
nv_wr32(dev, 0x001700, chan >> 16);
|
||||
|
||||
/* channel setup */
|
||||
nv_wr32(dev, 0x700200, lower_32_bits(chan + 0x1000));
|
||||
nv_wr32(dev, 0x700204, upper_32_bits(chan + 0x1000));
|
||||
nv_wr32(dev, 0x700208, lower_32_bits(lim3));
|
||||
nv_wr32(dev, 0x70020c, upper_32_bits(lim3));
|
||||
|
||||
/* point pgd -> pgt */
|
||||
nv_wr32(dev, 0x701000, 0);
|
||||
nv_wr32(dev, 0x701004, ((chan + 0x2000) >> 8) | 1);
|
||||
|
||||
/* point pgt -> physical vram for channel */
|
||||
pgt3 = 0x2000;
|
||||
for (i = 0; i < dev_priv->ramin_rsvd_vram; i += 4096, pgt3 += 8) {
|
||||
nv_wr32(dev, 0x700000 + pgt3, ((chan + i) >> 8) | 1);
|
||||
nv_wr32(dev, 0x700004 + pgt3, 0);
|
||||
}
|
||||
|
||||
/* clear rest of pgt */
|
||||
for (; i < dev_priv->ramin_size; i += 4096, pgt3 += 8) {
|
||||
nv_wr32(dev, 0x700000 + pgt3, 0);
|
||||
nv_wr32(dev, 0x700004 + pgt3, 0);
|
||||
}
|
||||
|
||||
/* point bar3 at the channel */
|
||||
nv_wr32(dev, 0x001714, 0xc0000000 | (chan >> 12));
|
||||
|
||||
/* Global PRAMIN heap */
|
||||
ret = drm_mm_init(&dev_priv->ramin_heap, imem,
|
||||
dev_priv->ramin_size - imem);
|
||||
if (ret) {
|
||||
NV_ERROR(dev, "Failed to init RAMIN heap\n");
|
||||
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
|
||||
if (!priv)
|
||||
return -ENOMEM;
|
||||
}
|
||||
pinstmem->priv = priv;
|
||||
|
||||
/* BAR3 VM */
|
||||
ret = nouveau_vm_new(dev, 0, pci_resource_len(pdev, 3), 0,
|
||||
&dev_priv->bar3_vm);
|
||||
if (ret)
|
||||
goto error;
|
||||
|
||||
ret = nouveau_gpuobj_new(dev, NULL,
|
||||
(pci_resource_len(pdev, 3) >> 12) * 8, 0,
|
||||
NVOBJ_FLAG_DONT_MAP |
|
||||
NVOBJ_FLAG_ZERO_ALLOC,
|
||||
&dev_priv->bar3_vm->pgt[0].obj[0]);
|
||||
if (ret)
|
||||
goto error;
|
||||
dev_priv->bar3_vm->pgt[0].refcount[0] = 1;
|
||||
|
||||
nv50_instmem_map(dev_priv->bar3_vm->pgt[0].obj[0]);
|
||||
|
||||
ret = nouveau_gpuobj_new(dev, NULL, 0x8000, 4096,
|
||||
NVOBJ_FLAG_ZERO_ALLOC, &priv->bar3_pgd);
|
||||
if (ret)
|
||||
goto error;
|
||||
|
||||
ret = nouveau_vm_ref(dev_priv->bar3_vm, &vm, priv->bar3_pgd);
|
||||
if (ret)
|
||||
goto error;
|
||||
nouveau_vm_ref(NULL, &vm, NULL);
|
||||
|
||||
ret = nvc0_channel_new(dev, 8192, dev_priv->bar3_vm, &priv->bar3,
|
||||
priv->bar3_pgd, pci_resource_len(dev->pdev, 3));
|
||||
if (ret)
|
||||
goto error;
|
||||
|
||||
/* BAR1 VM */
|
||||
ret = nouveau_vm_new(dev, 0, pci_resource_len(pdev, 1), 0, &vm);
|
||||
if (ret)
|
||||
goto error;
|
||||
|
||||
ret = nouveau_gpuobj_new(dev, NULL, 0x8000, 4096,
|
||||
NVOBJ_FLAG_ZERO_ALLOC, &priv->bar1_pgd);
|
||||
if (ret)
|
||||
goto error;
|
||||
|
||||
ret = nouveau_vm_ref(vm, &dev_priv->bar1_vm, priv->bar1_pgd);
|
||||
if (ret)
|
||||
goto error;
|
||||
nouveau_vm_ref(NULL, &vm, NULL);
|
||||
|
||||
ret = nvc0_channel_new(dev, 8192, dev_priv->bar1_vm, &priv->bar1,
|
||||
priv->bar1_pgd, pci_resource_len(dev->pdev, 1));
|
||||
if (ret)
|
||||
goto error;
|
||||
|
||||
/* channel vm */
|
||||
ret = nouveau_vm_new(dev, 0, (1ULL << 40), 0x0008000000ULL, &vm);
|
||||
if (ret)
|
||||
goto error;
|
||||
|
||||
ret = nouveau_gpuobj_new(dev, NULL, 0x8000, 4096, 0, &priv->chan_pgd);
|
||||
if (ret)
|
||||
goto error;
|
||||
|
||||
nouveau_vm_ref(vm, &dev_priv->chan_vm, priv->chan_pgd);
|
||||
nouveau_vm_ref(NULL, &vm, NULL);
|
||||
|
||||
nvc0_instmem_resume(dev);
|
||||
return 0;
|
||||
error:
|
||||
nvc0_instmem_takedown(dev);
|
||||
return ret;
|
||||
}
|
||||
|
||||
void
|
||||
nvc0_instmem_takedown(struct drm_device *dev)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nvc0_instmem_priv *priv = dev_priv->engine.instmem.priv;
|
||||
struct nouveau_vm *vm = NULL;
|
||||
|
||||
nvc0_instmem_suspend(dev);
|
||||
|
||||
nv_wr32(dev, 0x1704, 0x00000000);
|
||||
nv_wr32(dev, 0x1714, 0x00000000);
|
||||
|
||||
nouveau_vm_ref(NULL, &dev_priv->chan_vm, priv->chan_pgd);
|
||||
nouveau_gpuobj_ref(NULL, &priv->chan_pgd);
|
||||
|
||||
nvc0_channel_del(&priv->bar1);
|
||||
nouveau_vm_ref(NULL, &dev_priv->bar1_vm, priv->bar1_pgd);
|
||||
nouveau_gpuobj_ref(NULL, &priv->bar1_pgd);
|
||||
|
||||
nvc0_channel_del(&priv->bar3);
|
||||
nouveau_vm_ref(dev_priv->bar3_vm, &vm, NULL);
|
||||
nouveau_vm_ref(NULL, &vm, priv->bar3_pgd);
|
||||
nouveau_gpuobj_ref(NULL, &priv->bar3_pgd);
|
||||
nouveau_gpuobj_ref(NULL, &dev_priv->bar3_vm->pgt[0].obj[0]);
|
||||
nouveau_vm_ref(NULL, &dev_priv->bar3_vm, NULL);
|
||||
|
||||
dev_priv->engine.instmem.priv = NULL;
|
||||
kfree(priv);
|
||||
}
|
||||
|
||||
|
123
drivers/gpu/drm/nouveau/nvc0_vm.c
Normal file
123
drivers/gpu/drm/nouveau/nvc0_vm.c
Normal file
@ -0,0 +1,123 @@
|
||||
/*
|
||||
* Copyright 2010 Red Hat Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: Ben Skeggs
|
||||
*/
|
||||
|
||||
#include "drmP.h"
|
||||
|
||||
#include "nouveau_drv.h"
|
||||
#include "nouveau_vm.h"
|
||||
|
||||
void
|
||||
nvc0_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 index,
|
||||
struct nouveau_gpuobj *pgt[2])
|
||||
{
|
||||
u32 pde[2] = { 0, 0 };
|
||||
|
||||
if (pgt[0])
|
||||
pde[1] = 0x00000001 | (pgt[0]->vinst >> 8);
|
||||
if (pgt[1])
|
||||
pde[0] = 0x00000001 | (pgt[1]->vinst >> 8);
|
||||
|
||||
nv_wo32(pgd, (index * 8) + 0, pde[0]);
|
||||
nv_wo32(pgd, (index * 8) + 4, pde[1]);
|
||||
}
|
||||
|
||||
static inline u64
|
||||
nvc0_vm_addr(struct nouveau_vma *vma, u64 phys, u32 memtype, u32 target)
|
||||
{
|
||||
phys >>= 8;
|
||||
|
||||
phys |= 0x00000001; /* present */
|
||||
// if (vma->access & NV_MEM_ACCESS_SYS)
|
||||
// phys |= 0x00000002;
|
||||
|
||||
phys |= ((u64)target << 32);
|
||||
phys |= ((u64)memtype << 36);
|
||||
|
||||
return phys;
|
||||
}
|
||||
|
||||
void
|
||||
nvc0_vm_map(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
|
||||
struct nouveau_vram *mem, u32 pte, u32 cnt, u64 phys)
|
||||
{
|
||||
u32 next = 1 << (vma->node->type - 8);
|
||||
|
||||
phys = nvc0_vm_addr(vma, phys, mem->memtype, 0);
|
||||
pte <<= 3;
|
||||
while (cnt--) {
|
||||
nv_wo32(pgt, pte + 0, lower_32_bits(phys));
|
||||
nv_wo32(pgt, pte + 4, upper_32_bits(phys));
|
||||
phys += next;
|
||||
pte += 8;
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
nvc0_vm_map_sg(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
|
||||
u32 pte, dma_addr_t *list, u32 cnt)
|
||||
{
|
||||
pte <<= 3;
|
||||
while (cnt--) {
|
||||
u64 phys = nvc0_vm_addr(vma, *list++, 0, 5);
|
||||
nv_wo32(pgt, pte + 0, lower_32_bits(phys));
|
||||
nv_wo32(pgt, pte + 4, upper_32_bits(phys));
|
||||
pte += 8;
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
nvc0_vm_unmap(struct nouveau_gpuobj *pgt, u32 pte, u32 cnt)
|
||||
{
|
||||
pte <<= 3;
|
||||
while (cnt--) {
|
||||
nv_wo32(pgt, pte + 0, 0x00000000);
|
||||
nv_wo32(pgt, pte + 4, 0x00000000);
|
||||
pte += 8;
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
nvc0_vm_flush(struct nouveau_vm *vm)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = vm->dev->dev_private;
|
||||
struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
|
||||
struct drm_device *dev = vm->dev;
|
||||
struct nouveau_vm_pgd *vpgd;
|
||||
u32 r100c80, engine;
|
||||
|
||||
pinstmem->flush(vm->dev);
|
||||
|
||||
if (vm == dev_priv->chan_vm)
|
||||
engine = 1;
|
||||
else
|
||||
engine = 5;
|
||||
|
||||
list_for_each_entry(vpgd, &vm->pgd_list, head) {
|
||||
r100c80 = nv_rd32(dev, 0x100c80);
|
||||
nv_wr32(dev, 0x100cb8, vpgd->obj->vinst >> 8);
|
||||
nv_wr32(dev, 0x100cbc, 0x80000000 | engine);
|
||||
if (!nv_wait(dev, 0x100c80, 0xffffffff, r100c80))
|
||||
NV_ERROR(dev, "vm flush timeout eng %d\n", engine);
|
||||
}
|
||||
}
|
99
drivers/gpu/drm/nouveau/nvc0_vram.c
Normal file
99
drivers/gpu/drm/nouveau/nvc0_vram.c
Normal file
@ -0,0 +1,99 @@
|
||||
/*
|
||||
* Copyright 2010 Red Hat Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: Ben Skeggs
|
||||
*/
|
||||
|
||||
#include "drmP.h"
|
||||
#include "nouveau_drv.h"
|
||||
#include "nouveau_mm.h"
|
||||
|
||||
bool
|
||||
nvc0_vram_flags_valid(struct drm_device *dev, u32 tile_flags)
|
||||
{
|
||||
switch (tile_flags & NOUVEAU_GEM_TILE_LAYOUT_MASK) {
|
||||
case 0x0000:
|
||||
case 0xfe00:
|
||||
case 0xdb00:
|
||||
case 0x1100:
|
||||
return true;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
int
|
||||
nvc0_vram_new(struct drm_device *dev, u64 size, u32 align, u32 ncmin,
|
||||
u32 type, struct nouveau_vram **pvram)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct ttm_bo_device *bdev = &dev_priv->ttm.bdev;
|
||||
struct ttm_mem_type_manager *man = &bdev->man[TTM_PL_VRAM];
|
||||
struct nouveau_mm *mm = man->priv;
|
||||
struct nouveau_mm_node *r;
|
||||
struct nouveau_vram *vram;
|
||||
int ret;
|
||||
|
||||
size >>= 12;
|
||||
align >>= 12;
|
||||
ncmin >>= 12;
|
||||
|
||||
vram = kzalloc(sizeof(*vram), GFP_KERNEL);
|
||||
if (!vram)
|
||||
return -ENOMEM;
|
||||
|
||||
INIT_LIST_HEAD(&vram->regions);
|
||||
vram->dev = dev_priv->dev;
|
||||
vram->memtype = type;
|
||||
vram->size = size;
|
||||
|
||||
mutex_lock(&mm->mutex);
|
||||
do {
|
||||
ret = nouveau_mm_get(mm, 1, size, ncmin, align, &r);
|
||||
if (ret) {
|
||||
mutex_unlock(&mm->mutex);
|
||||
nv50_vram_del(dev, &vram);
|
||||
return ret;
|
||||
}
|
||||
|
||||
list_add_tail(&r->rl_entry, &vram->regions);
|
||||
size -= r->length;
|
||||
} while (size);
|
||||
mutex_unlock(&mm->mutex);
|
||||
|
||||
r = list_first_entry(&vram->regions, struct nouveau_mm_node, rl_entry);
|
||||
vram->offset = (u64)r->offset << 12;
|
||||
*pvram = vram;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int
|
||||
nvc0_vram_init(struct drm_device *dev)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
|
||||
dev_priv->vram_size = nv_rd32(dev, 0x10f20c) << 20;
|
||||
dev_priv->vram_size *= nv_rd32(dev, 0x121c74);
|
||||
dev_priv->vram_rblock_size = 4096;
|
||||
return 0;
|
||||
}
|
Loading…
Reference in New Issue
Block a user