diff --git a/drivers/gpu/drm/drm_gpuva_mgr.c b/drivers/gpu/drm/drm_gpuva_mgr.c index f86bfad74ff8..ad99c9cfedac 100644 --- a/drivers/gpu/drm/drm_gpuva_mgr.c +++ b/drivers/gpu/drm/drm_gpuva_mgr.c @@ -1076,7 +1076,7 @@ __drm_gpuva_sm_map(struct drm_gpuva_manager *mgr, u64 req_addr, u64 req_range, struct drm_gem_object *req_obj, u64 req_offset) { - struct drm_gpuva *va, *next, *prev = NULL; + struct drm_gpuva *va, *next; u64 req_end = req_addr + req_range; int ret; @@ -1106,7 +1106,7 @@ __drm_gpuva_sm_map(struct drm_gpuva_manager *mgr, ret = op_unmap_cb(ops, priv, va, merge); if (ret) return ret; - goto next; + continue; } if (end > req_end) { @@ -1151,7 +1151,7 @@ __drm_gpuva_sm_map(struct drm_gpuva_manager *mgr, ret = op_remap_cb(ops, priv, &p, NULL, &u); if (ret) return ret; - goto next; + continue; } if (end > req_end) { @@ -1184,7 +1184,7 @@ __drm_gpuva_sm_map(struct drm_gpuva_manager *mgr, ret = op_unmap_cb(ops, priv, va, merge); if (ret) return ret; - goto next; + continue; } if (end > req_end) { @@ -1205,8 +1205,6 @@ __drm_gpuva_sm_map(struct drm_gpuva_manager *mgr, break; } } -next: - prev = va; } return op_map_cb(ops, priv, diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.c b/drivers/gpu/drm/nouveau/nouveau_dma.c index b90cac6d5772..b01c029f3a90 100644 --- a/drivers/gpu/drm/nouveau/nouveau_dma.c +++ b/drivers/gpu/drm/nouveau/nouveau_dma.c @@ -69,16 +69,19 @@ READ_GET(struct nouveau_channel *chan, uint64_t *prev_get, int *timeout) } void -nv50_dma_push(struct nouveau_channel *chan, u64 offset, int length) +nv50_dma_push(struct nouveau_channel *chan, u64 offset, u32 length, + bool no_prefetch) { struct nvif_user *user = &chan->drm->client.device.user; struct nouveau_bo *pb = chan->push.buffer; int ip = (chan->dma.ib_put * 2) + chan->dma.ib_base; BUG_ON(chan->dma.ib_free < 1); + WARN_ON(length > NV50_DMA_PUSH_MAX_LENGTH); nouveau_bo_wr32(pb, ip++, lower_32_bits(offset)); - nouveau_bo_wr32(pb, ip++, upper_32_bits(offset) | length << 8); + nouveau_bo_wr32(pb, ip++, upper_32_bits(offset) | length << 8 | + (no_prefetch ? (1 << 31) : 0)); chan->dma.ib_put = (chan->dma.ib_put + 1) & chan->dma.ib_max; diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.h b/drivers/gpu/drm/nouveau/nouveau_dma.h index 035a709c7be1..1744d95b233e 100644 --- a/drivers/gpu/drm/nouveau/nouveau_dma.h +++ b/drivers/gpu/drm/nouveau/nouveau_dma.h @@ -31,7 +31,8 @@ #include "nouveau_chan.h" int nouveau_dma_wait(struct nouveau_channel *, int slots, int size); -void nv50_dma_push(struct nouveau_channel *, u64 addr, int length); +void nv50_dma_push(struct nouveau_channel *, u64 addr, u32 length, + bool no_prefetch); /* * There's a hw race condition where you can't jump to your PUT offset, @@ -45,6 +46,9 @@ void nv50_dma_push(struct nouveau_channel *, u64 addr, int length); */ #define NOUVEAU_DMA_SKIPS (128 / 4) +/* Maximum push buffer size. */ +#define NV50_DMA_PUSH_MAX_LENGTH 0x7fffff + /* Object handles - for stuff that's doesn't use handle == oclass. */ enum { NvDmaFB = 0x80000002, @@ -89,7 +93,7 @@ FIRE_RING(struct nouveau_channel *chan) if (chan->dma.ib_max) { nv50_dma_push(chan, chan->push.addr + (chan->dma.put << 2), - (chan->dma.cur - chan->dma.put) << 2); + (chan->dma.cur - chan->dma.put) << 2, false); } else { WRITE_PUT(chan->dma.cur); } diff --git a/drivers/gpu/drm/nouveau/nouveau_exec.c b/drivers/gpu/drm/nouveau/nouveau_exec.c index 0f927adda4ed..a90c4cd8cbb2 100644 --- a/drivers/gpu/drm/nouveau/nouveau_exec.c +++ b/drivers/gpu/drm/nouveau/nouveau_exec.c @@ -164,8 +164,10 @@ nouveau_exec_job_run(struct nouveau_job *job) } for (i = 0; i < exec_job->push.count; i++) { - nv50_dma_push(chan, exec_job->push.s[i].va, - exec_job->push.s[i].va_len); + struct drm_nouveau_exec_push *p = &exec_job->push.s[i]; + bool no_prefetch = p->flags & DRM_NOUVEAU_EXEC_PUSH_NO_PREFETCH; + + nv50_dma_push(chan, p->va, p->va_len, no_prefetch); } ret = nouveau_fence_emit(fence, chan); @@ -223,7 +225,18 @@ nouveau_exec_job_init(struct nouveau_exec_job **pjob, { struct nouveau_exec_job *job; struct nouveau_job_args args = {}; - int ret; + int i, ret; + + for (i = 0; i < __args->push.count; i++) { + struct drm_nouveau_exec_push *p = &__args->push.s[i]; + + if (unlikely(p->va_len > NV50_DMA_PUSH_MAX_LENGTH)) { + NV_PRINTK(err, nouveau_cli(__args->file_priv), + "pushbuf size exceeds limit: 0x%x max 0x%x\n", + p->va_len, NV50_DMA_PUSH_MAX_LENGTH); + return -EINVAL; + } + } job = *pjob = kzalloc(sizeof(*job), GFP_KERNEL); if (!job) diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c index f39360870c70..c0b10d8d3d03 100644 --- a/drivers/gpu/drm/nouveau/nouveau_gem.c +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c @@ -856,9 +856,11 @@ revalidate: for (i = 0; i < req->nr_push; i++) { struct nouveau_vma *vma = (void *)(unsigned long) bo[push[i].bo_index].user_priv; + u64 addr = vma->addr + push[i].offset; + u32 length = push[i].length & ~NOUVEAU_GEM_PUSHBUF_NO_PREFETCH; + bool no_prefetch = push[i].length & NOUVEAU_GEM_PUSHBUF_NO_PREFETCH; - nv50_dma_push(chan, vma->addr + push[i].offset, - push[i].length); + nv50_dma_push(chan, addr, length, no_prefetch); } } else if (drm->client.device.info.chipset >= 0x25) { diff --git a/drivers/gpu/drm/nouveau/nouveau_sched.c b/drivers/gpu/drm/nouveau/nouveau_sched.c index 3424a1bf6af3..88217185e0f3 100644 --- a/drivers/gpu/drm/nouveau/nouveau_sched.c +++ b/drivers/gpu/drm/nouveau/nouveau_sched.c @@ -292,6 +292,28 @@ nouveau_job_submit(struct nouveau_job *job) if (job->sync) done_fence = dma_fence_get(job->done_fence); + /* If a sched job depends on a dma-fence from a job from the same GPU + * scheduler instance, but a different scheduler entity, the GPU + * scheduler does only wait for the particular job to be scheduled, + * rather than for the job to fully complete. This is due to the GPU + * scheduler assuming that there is a scheduler instance per ring. + * However, the current implementation, in order to avoid arbitrary + * amounts of kthreads, has a single scheduler instance while scheduler + * entities represent rings. + * + * As a workaround, set the DRM_SCHED_FENCE_DONT_PIPELINE for all + * out-fences in order to force the scheduler to wait for full job + * completion for dependent jobs from different entities and same + * scheduler instance. + * + * There is some work in progress [1] to address the issues of firmware + * schedulers; once it is in-tree the scheduler topology in Nouveau + * should be re-worked accordingly. + * + * [1] https://lore.kernel.org/dri-devel/20230801205103.627779-1-matthew.brost@intel.com/ + */ + set_bit(DRM_SCHED_FENCE_DONT_PIPELINE, &job->done_fence->flags); + if (job->ops->armed_submit) job->ops->armed_submit(job); diff --git a/drivers/gpu/drm/nouveau/nouveau_uvmm.c b/drivers/gpu/drm/nouveau/nouveau_uvmm.c index 3a1e8538f205..aae780e4a4aa 100644 --- a/drivers/gpu/drm/nouveau/nouveau_uvmm.c +++ b/drivers/gpu/drm/nouveau/nouveau_uvmm.c @@ -639,6 +639,7 @@ nouveau_uvmm_sm_prepare(struct nouveau_uvmm *uvmm, struct drm_gpuva *va = r->unmap->va; struct uvmm_map_args remap_args = { .kind = uvma_from_va(va)->kind, + .region = uvma_from_va(va)->region, }; u64 ustart = va->va.addr; u64 urange = va->va.range; diff --git a/drivers/gpu/drm/tests/drm_kunit_helpers.c b/drivers/gpu/drm/tests/drm_kunit_helpers.c index 3d624ff2f651..c1dfbfcaa000 100644 --- a/drivers/gpu/drm/tests/drm_kunit_helpers.c +++ b/drivers/gpu/drm/tests/drm_kunit_helpers.c @@ -156,7 +156,7 @@ static void action_drm_release_context(void *ptr) } /** - * drm_kunit_helper_context_alloc - Allocates an acquire context + * drm_kunit_helper_acquire_ctx_alloc - Allocates an acquire context * @test: The test context object * * Allocates and initializes a modeset acquire context. diff --git a/drivers/gpu/drm/ttm/tests/ttm_pool_test.c b/drivers/gpu/drm/ttm/tests/ttm_pool_test.c index 8d90870fb199..2d9cae8cd984 100644 --- a/drivers/gpu/drm/ttm/tests/ttm_pool_test.c +++ b/drivers/gpu/drm/ttm/tests/ttm_pool_test.c @@ -228,8 +228,8 @@ static void ttm_pool_alloc_basic_dma_addr(struct kunit *test) dma1 = tt->dma_address[0]; dma2 = tt->dma_address[tt->num_pages - 1]; - KUNIT_ASSERT_NOT_NULL(test, (void *)dma1); - KUNIT_ASSERT_NOT_NULL(test, (void *)dma2); + KUNIT_ASSERT_NOT_NULL(test, (void *)(uintptr_t)dma1); + KUNIT_ASSERT_NOT_NULL(test, (void *)(uintptr_t)dma2); ttm_pool_free(pool, tt); ttm_tt_fini(tt); diff --git a/include/uapi/drm/nouveau_drm.h b/include/uapi/drm/nouveau_drm.h index b1ad9d5ffce8..8d7402c13e56 100644 --- a/include/uapi/drm/nouveau_drm.h +++ b/include/uapi/drm/nouveau_drm.h @@ -138,6 +138,7 @@ struct drm_nouveau_gem_pushbuf_push { __u32 pad; __u64 offset; __u64 length; +#define NOUVEAU_GEM_PUSHBUF_NO_PREFETCH (1 << 23) }; struct drm_nouveau_gem_pushbuf { @@ -338,7 +339,12 @@ struct drm_nouveau_exec_push { /** * @va_len: the length of the push buffer mapping */ - __u64 va_len; + __u32 va_len; + /** + * @flags: the flags for this push buffer mapping + */ + __u32 flags; +#define DRM_NOUVEAU_EXEC_PUSH_NO_PREFETCH 0x1 }; /**