mirror of
https://github.com/torvalds/linux.git
synced 2024-11-27 06:31:52 +00:00
Merge remote branch 'nouveau/for-airlied' of ../drm-nouveau-next into drm-linus
* 'nouveau/for-airlied' of ../drm-nouveau-next: drm/nv50: fix vram ptes on IGPs to point at stolen system memory drm/nv50: fix instmem binding on IGPs to point at stolen system memory drm/nv50: improve vram page table construction drm/nv50: more efficient clearing of gpu page table entries drm/nv50: make nv50_mem_vm_{bind,unbind} operate only on vram drm/nouveau: Fix up pre-nv17 analog load detection.
This commit is contained in:
commit
f7072e00f0
@ -583,6 +583,7 @@ struct drm_nouveau_private {
|
||||
uint64_t vm_end;
|
||||
struct nouveau_gpuobj *vm_vram_pt[NV50_VM_VRAM_NR];
|
||||
int vm_vram_pt_nr;
|
||||
uint64_t vram_sys_base;
|
||||
|
||||
/* the mtrr covering the FB */
|
||||
int fb_mtrr;
|
||||
|
@ -285,53 +285,50 @@ nv50_mem_vm_bind_linear(struct drm_device *dev, uint64_t virt, uint32_t size,
|
||||
uint32_t flags, uint64_t phys)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nouveau_gpuobj **pgt;
|
||||
unsigned psz, pfl, pages;
|
||||
struct nouveau_gpuobj *pgt;
|
||||
unsigned block;
|
||||
int i;
|
||||
|
||||
if (virt >= dev_priv->vm_gart_base &&
|
||||
(virt + size) < (dev_priv->vm_gart_base + dev_priv->vm_gart_size)) {
|
||||
psz = 12;
|
||||
pgt = &dev_priv->gart_info.sg_ctxdma;
|
||||
pfl = 0x21;
|
||||
virt -= dev_priv->vm_gart_base;
|
||||
} else
|
||||
if (virt >= dev_priv->vm_vram_base &&
|
||||
(virt + size) < (dev_priv->vm_vram_base + dev_priv->vm_vram_size)) {
|
||||
psz = 16;
|
||||
pgt = dev_priv->vm_vram_pt;
|
||||
pfl = 0x01;
|
||||
virt -= dev_priv->vm_vram_base;
|
||||
} else {
|
||||
NV_ERROR(dev, "Invalid address: 0x%16llx-0x%16llx\n",
|
||||
virt, virt + size - 1);
|
||||
return -EINVAL;
|
||||
virt = ((virt - dev_priv->vm_vram_base) >> 16) << 1;
|
||||
size = (size >> 16) << 1;
|
||||
|
||||
phys |= ((uint64_t)flags << 32);
|
||||
phys |= 1;
|
||||
if (dev_priv->vram_sys_base) {
|
||||
phys += dev_priv->vram_sys_base;
|
||||
phys |= 0x30;
|
||||
}
|
||||
|
||||
pages = size >> psz;
|
||||
|
||||
dev_priv->engine.instmem.prepare_access(dev, true);
|
||||
if (flags & 0x80000000) {
|
||||
while (pages--) {
|
||||
struct nouveau_gpuobj *pt = pgt[virt >> 29];
|
||||
unsigned pte = ((virt & 0x1fffffffULL) >> psz) << 1;
|
||||
while (size) {
|
||||
unsigned offset_h = upper_32_bits(phys);
|
||||
unsigned offset_l = lower_32_bits(phys);
|
||||
unsigned pte, end;
|
||||
|
||||
nv_wo32(dev, pt, pte++, 0x00000000);
|
||||
nv_wo32(dev, pt, pte++, 0x00000000);
|
||||
|
||||
virt += (1 << psz);
|
||||
for (i = 7; i >= 0; i--) {
|
||||
block = 1 << (i + 1);
|
||||
if (size >= block && !(virt & (block - 1)))
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
while (pages--) {
|
||||
struct nouveau_gpuobj *pt = pgt[virt >> 29];
|
||||
unsigned pte = ((virt & 0x1fffffffULL) >> psz) << 1;
|
||||
unsigned offset_h = upper_32_bits(phys) & 0xff;
|
||||
unsigned offset_l = lower_32_bits(phys);
|
||||
offset_l |= (i << 7);
|
||||
|
||||
nv_wo32(dev, pt, pte++, offset_l | pfl);
|
||||
nv_wo32(dev, pt, pte++, offset_h | flags);
|
||||
phys += block << 15;
|
||||
size -= block;
|
||||
|
||||
phys += (1 << psz);
|
||||
virt += (1 << psz);
|
||||
while (block) {
|
||||
pgt = dev_priv->vm_vram_pt[virt >> 14];
|
||||
pte = virt & 0x3ffe;
|
||||
|
||||
end = pte + block;
|
||||
if (end > 16384)
|
||||
end = 16384;
|
||||
block -= (end - pte);
|
||||
virt += (end - pte);
|
||||
|
||||
while (pte < end) {
|
||||
nv_wo32(dev, pgt, pte++, offset_l);
|
||||
nv_wo32(dev, pgt, pte++, offset_h);
|
||||
}
|
||||
}
|
||||
}
|
||||
dev_priv->engine.instmem.finish_access(dev);
|
||||
@ -356,7 +353,41 @@ nv50_mem_vm_bind_linear(struct drm_device *dev, uint64_t virt, uint32_t size,
|
||||
void
|
||||
nv50_mem_vm_unbind(struct drm_device *dev, uint64_t virt, uint32_t size)
|
||||
{
|
||||
nv50_mem_vm_bind_linear(dev, virt, size, 0x80000000, 0);
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nouveau_gpuobj *pgt;
|
||||
unsigned pages, pte, end;
|
||||
|
||||
virt -= dev_priv->vm_vram_base;
|
||||
pages = (size >> 16) << 1;
|
||||
|
||||
dev_priv->engine.instmem.prepare_access(dev, true);
|
||||
while (pages) {
|
||||
pgt = dev_priv->vm_vram_pt[virt >> 29];
|
||||
pte = (virt & 0x1ffe0000ULL) >> 15;
|
||||
|
||||
end = pte + pages;
|
||||
if (end > 16384)
|
||||
end = 16384;
|
||||
pages -= (end - pte);
|
||||
virt += (end - pte) << 15;
|
||||
|
||||
while (pte < end)
|
||||
nv_wo32(dev, pgt, pte++, 0);
|
||||
}
|
||||
dev_priv->engine.instmem.finish_access(dev);
|
||||
|
||||
nv_wr32(dev, 0x100c80, 0x00050001);
|
||||
if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) {
|
||||
NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n");
|
||||
NV_ERROR(dev, "0x100c80 = 0x%08x\n", nv_rd32(dev, 0x100c80));
|
||||
return;
|
||||
}
|
||||
|
||||
nv_wr32(dev, 0x100c80, 0x00000001);
|
||||
if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) {
|
||||
NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n");
|
||||
NV_ERROR(dev, "0x100c80 = 0x%08x\n", nv_rd32(dev, 0x100c80));
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -119,7 +119,7 @@ static enum drm_connector_status nv04_dac_detect(struct drm_encoder *encoder,
|
||||
struct drm_connector *connector)
|
||||
{
|
||||
struct drm_device *dev = encoder->dev;
|
||||
uint8_t saved_seq1, saved_pi, saved_rpc1;
|
||||
uint8_t saved_seq1, saved_pi, saved_rpc1, saved_cr_mode;
|
||||
uint8_t saved_palette0[3], saved_palette_mask;
|
||||
uint32_t saved_rtest_ctrl, saved_rgen_ctrl;
|
||||
int i;
|
||||
@ -135,6 +135,9 @@ static enum drm_connector_status nv04_dac_detect(struct drm_encoder *encoder,
|
||||
/* only implemented for head A for now */
|
||||
NVSetOwner(dev, 0);
|
||||
|
||||
saved_cr_mode = NVReadVgaCrtc(dev, 0, NV_CIO_CR_MODE_INDEX);
|
||||
NVWriteVgaCrtc(dev, 0, NV_CIO_CR_MODE_INDEX, saved_cr_mode | 0x80);
|
||||
|
||||
saved_seq1 = NVReadVgaSeq(dev, 0, NV_VIO_SR_CLOCK_INDEX);
|
||||
NVWriteVgaSeq(dev, 0, NV_VIO_SR_CLOCK_INDEX, saved_seq1 & ~0x20);
|
||||
|
||||
@ -203,6 +206,7 @@ out:
|
||||
NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_PIXEL_INDEX, saved_pi);
|
||||
NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_RPC1_INDEX, saved_rpc1);
|
||||
NVWriteVgaSeq(dev, 0, NV_VIO_SR_CLOCK_INDEX, saved_seq1);
|
||||
NVWriteVgaCrtc(dev, 0, NV_CIO_CR_MODE_INDEX, saved_cr_mode);
|
||||
|
||||
if (blue == 0x18) {
|
||||
NV_INFO(dev, "Load detected on head A\n");
|
||||
|
@ -76,6 +76,11 @@ nv50_instmem_init(struct drm_device *dev)
|
||||
for (i = 0x1700; i <= 0x1710; i += 4)
|
||||
priv->save1700[(i-0x1700)/4] = nv_rd32(dev, i);
|
||||
|
||||
if (dev_priv->chipset == 0xaa || dev_priv->chipset == 0xac)
|
||||
dev_priv->vram_sys_base = nv_rd32(dev, 0x100e10) << 12;
|
||||
else
|
||||
dev_priv->vram_sys_base = 0;
|
||||
|
||||
/* Reserve the last MiB of VRAM, we should probably try to avoid
|
||||
* setting up the below tables over the top of the VBIOS image at
|
||||
* some point.
|
||||
@ -172,16 +177,28 @@ nv50_instmem_init(struct drm_device *dev)
|
||||
* We map the entire fake channel into the start of the PRAMIN BAR
|
||||
*/
|
||||
ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, pt_size, 0x1000,
|
||||
0, &priv->pramin_pt);
|
||||
0, &priv->pramin_pt);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
for (i = 0, v = c_offset; i < pt_size; i += 8, v += 0x1000) {
|
||||
if (v < (c_offset + c_size))
|
||||
BAR0_WI32(priv->pramin_pt->gpuobj, i + 0, v | 1);
|
||||
else
|
||||
BAR0_WI32(priv->pramin_pt->gpuobj, i + 0, 0x00000009);
|
||||
v = c_offset | 1;
|
||||
if (dev_priv->vram_sys_base) {
|
||||
v += dev_priv->vram_sys_base;
|
||||
v |= 0x30;
|
||||
}
|
||||
|
||||
i = 0;
|
||||
while (v < dev_priv->vram_sys_base + c_offset + c_size) {
|
||||
BAR0_WI32(priv->pramin_pt->gpuobj, i + 0, v);
|
||||
BAR0_WI32(priv->pramin_pt->gpuobj, i + 4, 0x00000000);
|
||||
v += 0x1000;
|
||||
i += 8;
|
||||
}
|
||||
|
||||
while (i < pt_size) {
|
||||
BAR0_WI32(priv->pramin_pt->gpuobj, i + 0, 0x00000000);
|
||||
BAR0_WI32(priv->pramin_pt->gpuobj, i + 4, 0x00000000);
|
||||
i += 8;
|
||||
}
|
||||
|
||||
BAR0_WI32(chan->vm_pd, 0x00, priv->pramin_pt->instance | 0x63);
|
||||
@ -416,7 +433,9 @@ nv50_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv;
|
||||
uint32_t pte, pte_end, vram;
|
||||
struct nouveau_gpuobj *pramin_pt = priv->pramin_pt->gpuobj;
|
||||
uint32_t pte, pte_end;
|
||||
uint64_t vram;
|
||||
|
||||
if (!gpuobj->im_backing || !gpuobj->im_pramin || gpuobj->im_bound)
|
||||
return -EINVAL;
|
||||
@ -424,20 +443,24 @@ nv50_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
|
||||
NV_DEBUG(dev, "st=0x%0llx sz=0x%0llx\n",
|
||||
gpuobj->im_pramin->start, gpuobj->im_pramin->size);
|
||||
|
||||
pte = (gpuobj->im_pramin->start >> 12) << 3;
|
||||
pte_end = ((gpuobj->im_pramin->size >> 12) << 3) + pte;
|
||||
pte = (gpuobj->im_pramin->start >> 12) << 1;
|
||||
pte_end = ((gpuobj->im_pramin->size >> 12) << 1) + pte;
|
||||
vram = gpuobj->im_backing_start;
|
||||
|
||||
NV_DEBUG(dev, "pramin=0x%llx, pte=%d, pte_end=%d\n",
|
||||
gpuobj->im_pramin->start, pte, pte_end);
|
||||
NV_DEBUG(dev, "first vram page: 0x%08x\n", gpuobj->im_backing_start);
|
||||
|
||||
vram |= 1;
|
||||
if (dev_priv->vram_sys_base) {
|
||||
vram += dev_priv->vram_sys_base;
|
||||
vram |= 0x30;
|
||||
}
|
||||
|
||||
dev_priv->engine.instmem.prepare_access(dev, true);
|
||||
while (pte < pte_end) {
|
||||
nv_wo32(dev, priv->pramin_pt->gpuobj, (pte + 0)/4, vram | 1);
|
||||
nv_wo32(dev, priv->pramin_pt->gpuobj, (pte + 4)/4, 0x00000000);
|
||||
|
||||
pte += 8;
|
||||
nv_wo32(dev, pramin_pt, pte++, lower_32_bits(vram));
|
||||
nv_wo32(dev, pramin_pt, pte++, upper_32_bits(vram));
|
||||
vram += NV50_INSTMEM_PAGE_SIZE;
|
||||
}
|
||||
dev_priv->engine.instmem.finish_access(dev);
|
||||
@ -470,14 +493,13 @@ nv50_instmem_unbind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
|
||||
if (gpuobj->im_bound == 0)
|
||||
return -EINVAL;
|
||||
|
||||
pte = (gpuobj->im_pramin->start >> 12) << 3;
|
||||
pte_end = ((gpuobj->im_pramin->size >> 12) << 3) + pte;
|
||||
pte = (gpuobj->im_pramin->start >> 12) << 1;
|
||||
pte_end = ((gpuobj->im_pramin->size >> 12) << 1) + pte;
|
||||
|
||||
dev_priv->engine.instmem.prepare_access(dev, true);
|
||||
while (pte < pte_end) {
|
||||
nv_wo32(dev, priv->pramin_pt->gpuobj, (pte + 0)/4, 0x00000009);
|
||||
nv_wo32(dev, priv->pramin_pt->gpuobj, (pte + 4)/4, 0x00000000);
|
||||
pte += 8;
|
||||
nv_wo32(dev, priv->pramin_pt->gpuobj, pte++, 0x00000000);
|
||||
nv_wo32(dev, priv->pramin_pt->gpuobj, pte++, 0x00000000);
|
||||
}
|
||||
dev_priv->engine.instmem.finish_access(dev);
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user