forked from Minki/linux
Merge branch 'drm-fixes' of git://people.freedesktop.org/~airlied/linux
Pull drm fixes from Dave Airlie: "Three nouveau fixes, all user visible issues, and one radeon regression fix" * 'drm-fixes' of git://people.freedesktop.org/~airlied/linux: drm/radeon: enforce use of radeon_get_ib_value when reading user cmd drm/nouveau: add lockdep annotations drm/nv50/fb: Fix nullptr-deref on IGPs drm/nouveau: use different register to wait for secret scrubber
This commit is contained in:
commit
211b0cdc7d
@ -73,8 +73,11 @@ _nouveau_falcon_init(struct nouveau_object *object)
|
||||
nv_debug(falcon, "data limit: %d\n", falcon->data.limit);
|
||||
|
||||
/* wait for 'uc halted' to be signalled before continuing */
|
||||
if (falcon->secret) {
|
||||
nv_wait(falcon, 0x008, 0x00000010, 0x00000010);
|
||||
if (falcon->secret && falcon->version < 4) {
|
||||
if (!falcon->version)
|
||||
nv_wait(falcon, 0x008, 0x00000010, 0x00000010);
|
||||
else
|
||||
nv_wait(falcon, 0x180, 0x80000000, 0);
|
||||
nv_wo32(falcon, 0x004, 0x00000010);
|
||||
}
|
||||
|
||||
|
@ -99,7 +99,7 @@ nouveau_subdev_create_(struct nouveau_object *parent,
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
mutex_init(&subdev->mutex);
|
||||
__mutex_init(&subdev->mutex, subname, &oclass->lock_class_key);
|
||||
subdev->name = subname;
|
||||
|
||||
if (parent) {
|
||||
|
@ -50,10 +50,13 @@ int nouveau_object_fini(struct nouveau_object *, bool suspend);
|
||||
|
||||
extern struct nouveau_ofuncs nouveau_object_ofuncs;
|
||||
|
||||
/* Don't allocate dynamically, because lockdep needs lock_class_keys to be in
|
||||
* ".data". */
|
||||
struct nouveau_oclass {
|
||||
u32 handle;
|
||||
struct nouveau_ofuncs *ofuncs;
|
||||
struct nouveau_omthds *omthds;
|
||||
struct nouveau_ofuncs * const ofuncs;
|
||||
struct nouveau_omthds * const omthds;
|
||||
struct lock_class_key lock_class_key;
|
||||
};
|
||||
|
||||
#define nv_oclass(o) nv_object(o)->oclass
|
||||
|
@ -86,8 +86,8 @@ nouveau_fb_preinit(struct nouveau_fb *pfb)
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (!nouveau_mm_initialised(&pfb->tags) && tags) {
|
||||
ret = nouveau_mm_init(&pfb->tags, 0, ++tags, 1);
|
||||
if (!nouveau_mm_initialised(&pfb->tags)) {
|
||||
ret = nouveau_mm_init(&pfb->tags, 0, tags ? ++tags : 0, 1);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
@ -99,7 +99,7 @@ nv50_fb_vram_init(struct nouveau_fb *pfb)
|
||||
struct nouveau_bios *bios = nouveau_bios(device);
|
||||
const u32 rsvd_head = ( 256 * 1024) >> 12; /* vga memory */
|
||||
const u32 rsvd_tail = (1024 * 1024) >> 12; /* vbios etc */
|
||||
u32 size;
|
||||
u32 size, tags = 0;
|
||||
int ret;
|
||||
|
||||
pfb->ram.size = nv_rd32(pfb, 0x10020c);
|
||||
@ -140,10 +140,11 @@ nv50_fb_vram_init(struct nouveau_fb *pfb)
|
||||
return ret;
|
||||
|
||||
pfb->ram.ranks = (nv_rd32(pfb, 0x100200) & 0x4) ? 2 : 1;
|
||||
tags = nv_rd32(pfb, 0x100320);
|
||||
break;
|
||||
}
|
||||
|
||||
return nv_rd32(pfb, 0x100320);
|
||||
return tags;
|
||||
}
|
||||
|
||||
static int
|
||||
|
@ -245,6 +245,8 @@ static int nouveau_drm_probe(struct pci_dev *pdev,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct lock_class_key drm_client_lock_class_key;
|
||||
|
||||
static int
|
||||
nouveau_drm_load(struct drm_device *dev, unsigned long flags)
|
||||
{
|
||||
@ -256,6 +258,7 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags)
|
||||
ret = nouveau_cli_create(pdev, "DRM", sizeof(*drm), (void**)&drm);
|
||||
if (ret)
|
||||
return ret;
|
||||
lockdep_set_class(&drm->client.mutex, &drm_client_lock_class_key);
|
||||
|
||||
dev->dev_private = drm;
|
||||
drm->dev = dev;
|
||||
|
@ -2909,14 +2909,14 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
|
||||
return -EINVAL;
|
||||
}
|
||||
if (tiled) {
|
||||
dst_offset = ib[idx+1];
|
||||
dst_offset = radeon_get_ib_value(p, idx+1);
|
||||
dst_offset <<= 8;
|
||||
|
||||
ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
|
||||
p->idx += count + 7;
|
||||
} else {
|
||||
dst_offset = ib[idx+1];
|
||||
dst_offset |= ((u64)(ib[idx+2] & 0xff)) << 32;
|
||||
dst_offset = radeon_get_ib_value(p, idx+1);
|
||||
dst_offset |= ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32;
|
||||
|
||||
ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
|
||||
ib[idx+2] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
|
||||
@ -2954,12 +2954,12 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
|
||||
DRM_ERROR("bad L2T, frame to fields DMA_PACKET_COPY\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
dst_offset = ib[idx+1];
|
||||
dst_offset = radeon_get_ib_value(p, idx+1);
|
||||
dst_offset <<= 8;
|
||||
dst2_offset = ib[idx+2];
|
||||
dst2_offset = radeon_get_ib_value(p, idx+2);
|
||||
dst2_offset <<= 8;
|
||||
src_offset = ib[idx+8];
|
||||
src_offset |= ((u64)(ib[idx+9] & 0xff)) << 32;
|
||||
src_offset = radeon_get_ib_value(p, idx+8);
|
||||
src_offset |= ((u64)(radeon_get_ib_value(p, idx+9) & 0xff)) << 32;
|
||||
if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
|
||||
dev_warn(p->dev, "DMA L2T, frame to fields src buffer too small (%llu %lu)\n",
|
||||
src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
|
||||
@ -3014,12 +3014,12 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
|
||||
DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
dst_offset = ib[idx+1];
|
||||
dst_offset = radeon_get_ib_value(p, idx+1);
|
||||
dst_offset <<= 8;
|
||||
dst2_offset = ib[idx+2];
|
||||
dst2_offset = radeon_get_ib_value(p, idx+2);
|
||||
dst2_offset <<= 8;
|
||||
src_offset = ib[idx+8];
|
||||
src_offset |= ((u64)(ib[idx+9] & 0xff)) << 32;
|
||||
src_offset = radeon_get_ib_value(p, idx+8);
|
||||
src_offset |= ((u64)(radeon_get_ib_value(p, idx+9) & 0xff)) << 32;
|
||||
if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
|
||||
dev_warn(p->dev, "DMA L2T, broadcast src buffer too small (%llu %lu)\n",
|
||||
src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
|
||||
@ -3046,22 +3046,22 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
|
||||
/* detile bit */
|
||||
if (idx_value & (1 << 31)) {
|
||||
/* tiled src, linear dst */
|
||||
src_offset = ib[idx+1];
|
||||
src_offset = radeon_get_ib_value(p, idx+1);
|
||||
src_offset <<= 8;
|
||||
ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8);
|
||||
|
||||
dst_offset = ib[idx+7];
|
||||
dst_offset |= ((u64)(ib[idx+8] & 0xff)) << 32;
|
||||
dst_offset = radeon_get_ib_value(p, idx+7);
|
||||
dst_offset |= ((u64)(radeon_get_ib_value(p, idx+8) & 0xff)) << 32;
|
||||
ib[idx+7] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
|
||||
ib[idx+8] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
|
||||
} else {
|
||||
/* linear src, tiled dst */
|
||||
src_offset = ib[idx+7];
|
||||
src_offset |= ((u64)(ib[idx+8] & 0xff)) << 32;
|
||||
src_offset = radeon_get_ib_value(p, idx+7);
|
||||
src_offset |= ((u64)(radeon_get_ib_value(p, idx+8) & 0xff)) << 32;
|
||||
ib[idx+7] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
|
||||
ib[idx+8] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
|
||||
|
||||
dst_offset = ib[idx+1];
|
||||
dst_offset = radeon_get_ib_value(p, idx+1);
|
||||
dst_offset <<= 8;
|
||||
ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
|
||||
}
|
||||
@ -3098,12 +3098,12 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
|
||||
DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
dst_offset = ib[idx+1];
|
||||
dst_offset = radeon_get_ib_value(p, idx+1);
|
||||
dst_offset <<= 8;
|
||||
dst2_offset = ib[idx+2];
|
||||
dst2_offset = radeon_get_ib_value(p, idx+2);
|
||||
dst2_offset <<= 8;
|
||||
src_offset = ib[idx+8];
|
||||
src_offset |= ((u64)(ib[idx+9] & 0xff)) << 32;
|
||||
src_offset = radeon_get_ib_value(p, idx+8);
|
||||
src_offset |= ((u64)(radeon_get_ib_value(p, idx+9) & 0xff)) << 32;
|
||||
if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
|
||||
dev_warn(p->dev, "DMA L2T, broadcast src buffer too small (%llu %lu)\n",
|
||||
src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
|
||||
@ -3135,22 +3135,22 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
|
||||
/* detile bit */
|
||||
if (idx_value & (1 << 31)) {
|
||||
/* tiled src, linear dst */
|
||||
src_offset = ib[idx+1];
|
||||
src_offset = radeon_get_ib_value(p, idx+1);
|
||||
src_offset <<= 8;
|
||||
ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8);
|
||||
|
||||
dst_offset = ib[idx+7];
|
||||
dst_offset |= ((u64)(ib[idx+8] & 0xff)) << 32;
|
||||
dst_offset = radeon_get_ib_value(p, idx+7);
|
||||
dst_offset |= ((u64)(radeon_get_ib_value(p, idx+8) & 0xff)) << 32;
|
||||
ib[idx+7] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
|
||||
ib[idx+8] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
|
||||
} else {
|
||||
/* linear src, tiled dst */
|
||||
src_offset = ib[idx+7];
|
||||
src_offset |= ((u64)(ib[idx+8] & 0xff)) << 32;
|
||||
src_offset = radeon_get_ib_value(p, idx+7);
|
||||
src_offset |= ((u64)(radeon_get_ib_value(p, idx+8) & 0xff)) << 32;
|
||||
ib[idx+7] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
|
||||
ib[idx+8] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
|
||||
|
||||
dst_offset = ib[idx+1];
|
||||
dst_offset = radeon_get_ib_value(p, idx+1);
|
||||
dst_offset <<= 8;
|
||||
ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
|
||||
}
|
||||
@ -3176,10 +3176,10 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
|
||||
switch (misc) {
|
||||
case 0:
|
||||
/* L2L, byte */
|
||||
src_offset = ib[idx+2];
|
||||
src_offset |= ((u64)(ib[idx+4] & 0xff)) << 32;
|
||||
dst_offset = ib[idx+1];
|
||||
dst_offset |= ((u64)(ib[idx+3] & 0xff)) << 32;
|
||||
src_offset = radeon_get_ib_value(p, idx+2);
|
||||
src_offset |= ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
|
||||
dst_offset = radeon_get_ib_value(p, idx+1);
|
||||
dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff)) << 32;
|
||||
if ((src_offset + count) > radeon_bo_size(src_reloc->robj)) {
|
||||
dev_warn(p->dev, "DMA L2L, byte src buffer too small (%llu %lu)\n",
|
||||
src_offset + count, radeon_bo_size(src_reloc->robj));
|
||||
@ -3216,12 +3216,12 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
|
||||
DRM_ERROR("bad L2L, dw, broadcast DMA_PACKET_COPY\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
dst_offset = ib[idx+1];
|
||||
dst_offset |= ((u64)(ib[idx+4] & 0xff)) << 32;
|
||||
dst2_offset = ib[idx+2];
|
||||
dst2_offset |= ((u64)(ib[idx+5] & 0xff)) << 32;
|
||||
src_offset = ib[idx+3];
|
||||
src_offset |= ((u64)(ib[idx+6] & 0xff)) << 32;
|
||||
dst_offset = radeon_get_ib_value(p, idx+1);
|
||||
dst_offset |= ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
|
||||
dst2_offset = radeon_get_ib_value(p, idx+2);
|
||||
dst2_offset |= ((u64)(radeon_get_ib_value(p, idx+5) & 0xff)) << 32;
|
||||
src_offset = radeon_get_ib_value(p, idx+3);
|
||||
src_offset |= ((u64)(radeon_get_ib_value(p, idx+6) & 0xff)) << 32;
|
||||
if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
|
||||
dev_warn(p->dev, "DMA L2L, dw, broadcast src buffer too small (%llu %lu)\n",
|
||||
src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
|
||||
@ -3251,10 +3251,10 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
|
||||
}
|
||||
} else {
|
||||
/* L2L, dw */
|
||||
src_offset = ib[idx+2];
|
||||
src_offset |= ((u64)(ib[idx+4] & 0xff)) << 32;
|
||||
dst_offset = ib[idx+1];
|
||||
dst_offset |= ((u64)(ib[idx+3] & 0xff)) << 32;
|
||||
src_offset = radeon_get_ib_value(p, idx+2);
|
||||
src_offset |= ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
|
||||
dst_offset = radeon_get_ib_value(p, idx+1);
|
||||
dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff)) << 32;
|
||||
if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
|
||||
dev_warn(p->dev, "DMA L2L, dw src buffer too small (%llu %lu)\n",
|
||||
src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
|
||||
@ -3279,8 +3279,8 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
|
||||
DRM_ERROR("bad DMA_PACKET_CONSTANT_FILL\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
dst_offset = ib[idx+1];
|
||||
dst_offset |= ((u64)(ib[idx+3] & 0x00ff0000)) << 16;
|
||||
dst_offset = radeon_get_ib_value(p, idx+1);
|
||||
dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0x00ff0000)) << 16;
|
||||
if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
|
||||
dev_warn(p->dev, "DMA constant fill buffer too small (%llu %lu)\n",
|
||||
dst_offset, radeon_bo_size(dst_reloc->robj));
|
||||
|
@ -2623,14 +2623,14 @@ int r600_dma_cs_parse(struct radeon_cs_parser *p)
|
||||
return -EINVAL;
|
||||
}
|
||||
if (tiled) {
|
||||
dst_offset = ib[idx+1];
|
||||
dst_offset = radeon_get_ib_value(p, idx+1);
|
||||
dst_offset <<= 8;
|
||||
|
||||
ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
|
||||
p->idx += count + 5;
|
||||
} else {
|
||||
dst_offset = ib[idx+1];
|
||||
dst_offset |= ((u64)(ib[idx+2] & 0xff)) << 32;
|
||||
dst_offset = radeon_get_ib_value(p, idx+1);
|
||||
dst_offset |= ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32;
|
||||
|
||||
ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
|
||||
ib[idx+2] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
|
||||
@ -2658,32 +2658,32 @@ int r600_dma_cs_parse(struct radeon_cs_parser *p)
|
||||
/* detile bit */
|
||||
if (idx_value & (1 << 31)) {
|
||||
/* tiled src, linear dst */
|
||||
src_offset = ib[idx+1];
|
||||
src_offset = radeon_get_ib_value(p, idx+1);
|
||||
src_offset <<= 8;
|
||||
ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8);
|
||||
|
||||
dst_offset = ib[idx+5];
|
||||
dst_offset |= ((u64)(ib[idx+6] & 0xff)) << 32;
|
||||
dst_offset = radeon_get_ib_value(p, idx+5);
|
||||
dst_offset |= ((u64)(radeon_get_ib_value(p, idx+6) & 0xff)) << 32;
|
||||
ib[idx+5] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
|
||||
ib[idx+6] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
|
||||
} else {
|
||||
/* linear src, tiled dst */
|
||||
src_offset = ib[idx+5];
|
||||
src_offset |= ((u64)(ib[idx+6] & 0xff)) << 32;
|
||||
src_offset = radeon_get_ib_value(p, idx+5);
|
||||
src_offset |= ((u64)(radeon_get_ib_value(p, idx+6) & 0xff)) << 32;
|
||||
ib[idx+5] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
|
||||
ib[idx+6] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
|
||||
|
||||
dst_offset = ib[idx+1];
|
||||
dst_offset = radeon_get_ib_value(p, idx+1);
|
||||
dst_offset <<= 8;
|
||||
ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
|
||||
}
|
||||
p->idx += 7;
|
||||
} else {
|
||||
if (p->family >= CHIP_RV770) {
|
||||
src_offset = ib[idx+2];
|
||||
src_offset |= ((u64)(ib[idx+4] & 0xff)) << 32;
|
||||
dst_offset = ib[idx+1];
|
||||
dst_offset |= ((u64)(ib[idx+3] & 0xff)) << 32;
|
||||
src_offset = radeon_get_ib_value(p, idx+2);
|
||||
src_offset |= ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
|
||||
dst_offset = radeon_get_ib_value(p, idx+1);
|
||||
dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff)) << 32;
|
||||
|
||||
ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
|
||||
ib[idx+2] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
|
||||
@ -2691,10 +2691,10 @@ int r600_dma_cs_parse(struct radeon_cs_parser *p)
|
||||
ib[idx+4] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
|
||||
p->idx += 5;
|
||||
} else {
|
||||
src_offset = ib[idx+2];
|
||||
src_offset |= ((u64)(ib[idx+3] & 0xff)) << 32;
|
||||
dst_offset = ib[idx+1];
|
||||
dst_offset |= ((u64)(ib[idx+3] & 0xff0000)) << 16;
|
||||
src_offset = radeon_get_ib_value(p, idx+2);
|
||||
src_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff)) << 32;
|
||||
dst_offset = radeon_get_ib_value(p, idx+1);
|
||||
dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff0000)) << 16;
|
||||
|
||||
ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
|
||||
ib[idx+2] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
|
||||
@ -2724,8 +2724,8 @@ int r600_dma_cs_parse(struct radeon_cs_parser *p)
|
||||
DRM_ERROR("bad DMA_PACKET_WRITE\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
dst_offset = ib[idx+1];
|
||||
dst_offset |= ((u64)(ib[idx+3] & 0x00ff0000)) << 16;
|
||||
dst_offset = radeon_get_ib_value(p, idx+1);
|
||||
dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0x00ff0000)) << 16;
|
||||
if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
|
||||
dev_warn(p->dev, "DMA constant fill buffer too small (%llu %lu)\n",
|
||||
dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
|
||||
|
Loading…
Reference in New Issue
Block a user