forked from Minki/linux
drm/amdgpu: use entity instead of ring for CS
Further demangle ring from entity handling. Signed-off-by: Christian König <christian.koenig@amd.com> Reviewed-by: Chunming Zhou <david1.zhou@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
parent
8290268f31
commit
0d346a14c6
@ -523,7 +523,7 @@ struct amdgpu_cs_parser {
|
||||
|
||||
/* scheduler job object */
|
||||
struct amdgpu_job *job;
|
||||
struct amdgpu_ring *ring;
|
||||
struct drm_sched_entity *entity;
|
||||
|
||||
/* buffer objects */
|
||||
struct ww_acquire_ctx ticket;
|
||||
|
@ -893,13 +893,13 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p)
|
||||
static int amdgpu_cs_ib_vm_chunk(struct amdgpu_device *adev,
|
||||
struct amdgpu_cs_parser *p)
|
||||
{
|
||||
struct amdgpu_ring *ring = to_amdgpu_ring(p->entity->rq->sched);
|
||||
struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
|
||||
struct amdgpu_vm *vm = &fpriv->vm;
|
||||
struct amdgpu_ring *ring = p->ring;
|
||||
int r;
|
||||
|
||||
/* Only for UVD/VCE VM emulation */
|
||||
if (p->ring->funcs->parse_cs || p->ring->funcs->patch_cs_in_place) {
|
||||
if (ring->funcs->parse_cs || ring->funcs->patch_cs_in_place) {
|
||||
unsigned i, j;
|
||||
|
||||
for (i = 0, j = 0; i < p->nchunks && j < p->job->num_ibs; i++) {
|
||||
@ -940,7 +940,7 @@ static int amdgpu_cs_ib_vm_chunk(struct amdgpu_device *adev,
|
||||
offset = m->start * AMDGPU_GPU_PAGE_SIZE;
|
||||
kptr += va_start - offset;
|
||||
|
||||
if (p->ring->funcs->parse_cs) {
|
||||
if (ring->funcs->parse_cs) {
|
||||
memcpy(ib->ptr, kptr, chunk_ib->ib_bytes);
|
||||
amdgpu_bo_kunmap(aobj);
|
||||
|
||||
@ -979,14 +979,15 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
|
||||
{
|
||||
struct amdgpu_fpriv *fpriv = parser->filp->driver_priv;
|
||||
struct amdgpu_vm *vm = &fpriv->vm;
|
||||
int i, j;
|
||||
int r, ce_preempt = 0, de_preempt = 0;
|
||||
struct amdgpu_ring *ring;
|
||||
int i, j;
|
||||
|
||||
for (i = 0, j = 0; i < parser->nchunks && j < parser->job->num_ibs; i++) {
|
||||
struct amdgpu_cs_chunk *chunk;
|
||||
struct amdgpu_ib *ib;
|
||||
struct drm_amdgpu_cs_chunk_ib *chunk_ib;
|
||||
struct amdgpu_ring *ring;
|
||||
struct drm_sched_entity *entity;
|
||||
|
||||
chunk = &parser->chunks[i];
|
||||
ib = &parser->job->ibs[j];
|
||||
@ -1008,9 +1009,9 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
r = amdgpu_ctx_get_ring(parser->ctx, chunk_ib->ip_type,
|
||||
r = amdgpu_ctx_get_entity(parser->ctx, chunk_ib->ip_type,
|
||||
chunk_ib->ip_instance, chunk_ib->ring,
|
||||
&ring);
|
||||
&entity);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
@ -1018,14 +1019,14 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
|
||||
parser->job->preamble_status |=
|
||||
AMDGPU_PREAMBLE_IB_PRESENT;
|
||||
|
||||
if (parser->ring && parser->ring != ring)
|
||||
if (parser->entity && parser->entity != entity)
|
||||
return -EINVAL;
|
||||
|
||||
parser->ring = ring;
|
||||
parser->entity = entity;
|
||||
|
||||
r = amdgpu_ib_get(adev, vm,
|
||||
ring->funcs->parse_cs ? chunk_ib->ib_bytes : 0,
|
||||
ib);
|
||||
ring = to_amdgpu_ring(entity->rq->sched);
|
||||
r = amdgpu_ib_get(adev, vm, ring->funcs->parse_cs ?
|
||||
chunk_ib->ib_bytes : 0, ib);
|
||||
if (r) {
|
||||
DRM_ERROR("Failed to get ib !\n");
|
||||
return r;
|
||||
@ -1039,12 +1040,13 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
|
||||
}
|
||||
|
||||
/* UVD & VCE fw doesn't support user fences */
|
||||
ring = to_amdgpu_ring(parser->entity->rq->sched);
|
||||
if (parser->job->uf_addr && (
|
||||
parser->ring->funcs->type == AMDGPU_RING_TYPE_UVD ||
|
||||
parser->ring->funcs->type == AMDGPU_RING_TYPE_VCE))
|
||||
ring->funcs->type == AMDGPU_RING_TYPE_UVD ||
|
||||
ring->funcs->type == AMDGPU_RING_TYPE_VCE))
|
||||
return -EINVAL;
|
||||
|
||||
return amdgpu_ctx_wait_prev_fence(parser->ctx, parser->ring->idx);
|
||||
return amdgpu_ctx_wait_prev_fence(parser->ctx, parser->entity);
|
||||
}
|
||||
|
||||
static int amdgpu_cs_process_fence_dep(struct amdgpu_cs_parser *p,
|
||||
@ -1060,23 +1062,23 @@ static int amdgpu_cs_process_fence_dep(struct amdgpu_cs_parser *p,
|
||||
sizeof(struct drm_amdgpu_cs_chunk_dep);
|
||||
|
||||
for (i = 0; i < num_deps; ++i) {
|
||||
struct amdgpu_ring *ring;
|
||||
struct amdgpu_ctx *ctx;
|
||||
struct drm_sched_entity *entity;
|
||||
struct dma_fence *fence;
|
||||
|
||||
ctx = amdgpu_ctx_get(fpriv, deps[i].ctx_id);
|
||||
if (ctx == NULL)
|
||||
return -EINVAL;
|
||||
|
||||
r = amdgpu_ctx_get_ring(ctx, deps[i].ip_type,
|
||||
r = amdgpu_ctx_get_entity(ctx, deps[i].ip_type,
|
||||
deps[i].ip_instance,
|
||||
deps[i].ring, &ring);
|
||||
deps[i].ring, &entity);
|
||||
if (r) {
|
||||
amdgpu_ctx_put(ctx);
|
||||
return r;
|
||||
}
|
||||
|
||||
fence = amdgpu_ctx_get_fence(ctx, ring,
|
||||
fence = amdgpu_ctx_get_fence(ctx, entity,
|
||||
deps[i].handle);
|
||||
if (IS_ERR(fence)) {
|
||||
r = PTR_ERR(fence);
|
||||
@ -1195,9 +1197,9 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
|
||||
union drm_amdgpu_cs *cs)
|
||||
{
|
||||
struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
|
||||
struct amdgpu_ring *ring = p->ring;
|
||||
struct drm_sched_entity *entity = &p->ctx->rings[ring->idx].entity;
|
||||
struct drm_sched_entity *entity = p->entity;
|
||||
enum drm_sched_priority priority;
|
||||
struct amdgpu_ring *ring;
|
||||
struct amdgpu_bo_list_entry *e;
|
||||
struct amdgpu_job *job;
|
||||
uint64_t seq;
|
||||
@ -1227,7 +1229,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
|
||||
job->owner = p->filp;
|
||||
p->fence = dma_fence_get(&job->base.s_fence->finished);
|
||||
|
||||
r = amdgpu_ctx_add_fence(p->ctx, ring, p->fence, &seq);
|
||||
r = amdgpu_ctx_add_fence(p->ctx, entity, p->fence, &seq);
|
||||
if (r) {
|
||||
dma_fence_put(p->fence);
|
||||
dma_fence_put(&job->base.s_fence->finished);
|
||||
@ -1332,7 +1334,7 @@ int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data,
|
||||
{
|
||||
union drm_amdgpu_wait_cs *wait = data;
|
||||
unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout);
|
||||
struct amdgpu_ring *ring = NULL;
|
||||
struct drm_sched_entity *entity;
|
||||
struct amdgpu_ctx *ctx;
|
||||
struct dma_fence *fence;
|
||||
long r;
|
||||
@ -1341,14 +1343,14 @@ int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data,
|
||||
if (ctx == NULL)
|
||||
return -EINVAL;
|
||||
|
||||
r = amdgpu_ctx_get_ring(ctx, wait->in.ip_type, wait->in.ip_instance,
|
||||
wait->in.ring, &ring);
|
||||
r = amdgpu_ctx_get_entity(ctx, wait->in.ip_type, wait->in.ip_instance,
|
||||
wait->in.ring, &entity);
|
||||
if (r) {
|
||||
amdgpu_ctx_put(ctx);
|
||||
return r;
|
||||
}
|
||||
|
||||
fence = amdgpu_ctx_get_fence(ctx, ring, wait->in.handle);
|
||||
fence = amdgpu_ctx_get_fence(ctx, entity, wait->in.handle);
|
||||
if (IS_ERR(fence))
|
||||
r = PTR_ERR(fence);
|
||||
else if (fence) {
|
||||
@ -1380,7 +1382,7 @@ static struct dma_fence *amdgpu_cs_get_fence(struct amdgpu_device *adev,
|
||||
struct drm_file *filp,
|
||||
struct drm_amdgpu_fence *user)
|
||||
{
|
||||
struct amdgpu_ring *ring;
|
||||
struct drm_sched_entity *entity;
|
||||
struct amdgpu_ctx *ctx;
|
||||
struct dma_fence *fence;
|
||||
int r;
|
||||
@ -1389,14 +1391,14 @@ static struct dma_fence *amdgpu_cs_get_fence(struct amdgpu_device *adev,
|
||||
if (ctx == NULL)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
r = amdgpu_ctx_get_ring(ctx, user->ip_type, user->ip_instance,
|
||||
user->ring, &ring);
|
||||
r = amdgpu_ctx_get_entity(ctx, user->ip_type, user->ip_instance,
|
||||
user->ring, &entity);
|
||||
if (r) {
|
||||
amdgpu_ctx_put(ctx);
|
||||
return ERR_PTR(r);
|
||||
}
|
||||
|
||||
fence = amdgpu_ctx_get_fence(ctx, ring, user->seq_no);
|
||||
fence = amdgpu_ctx_get_fence(ctx, entity, user->seq_no);
|
||||
amdgpu_ctx_put(ctx);
|
||||
|
||||
return fence;
|
||||
|
@ -27,6 +27,9 @@
|
||||
#include "amdgpu.h"
|
||||
#include "amdgpu_sched.h"
|
||||
|
||||
#define to_amdgpu_ctx_ring(e) \
|
||||
container_of((e), struct amdgpu_ctx_ring, entity)
|
||||
|
||||
static int amdgpu_ctx_priority_permit(struct drm_file *filp,
|
||||
enum drm_sched_priority priority)
|
||||
{
|
||||
@ -151,12 +154,12 @@ static void amdgpu_ctx_fini(struct kref *ref)
|
||||
kfree(ctx);
|
||||
}
|
||||
|
||||
int amdgpu_ctx_get_ring(struct amdgpu_ctx *ctx,
|
||||
u32 hw_ip, u32 instance, u32 ring,
|
||||
struct amdgpu_ring **out_ring)
|
||||
int amdgpu_ctx_get_entity(struct amdgpu_ctx *ctx, u32 hw_ip, u32 instance,
|
||||
u32 ring, struct drm_sched_entity **entity)
|
||||
{
|
||||
struct amdgpu_device *adev = ctx->adev;
|
||||
unsigned num_rings = 0;
|
||||
struct amdgpu_ring *out_ring;
|
||||
|
||||
/* Right now all IPs have only one instance - multiple rings. */
|
||||
if (instance != 0) {
|
||||
@ -166,39 +169,39 @@ int amdgpu_ctx_get_ring(struct amdgpu_ctx *ctx,
|
||||
|
||||
switch (hw_ip) {
|
||||
case AMDGPU_HW_IP_GFX:
|
||||
*out_ring = &adev->gfx.gfx_ring[ring];
|
||||
out_ring = &adev->gfx.gfx_ring[ring];
|
||||
num_rings = adev->gfx.num_gfx_rings;
|
||||
break;
|
||||
case AMDGPU_HW_IP_COMPUTE:
|
||||
*out_ring = &adev->gfx.compute_ring[ring];
|
||||
out_ring = &adev->gfx.compute_ring[ring];
|
||||
num_rings = adev->gfx.num_compute_rings;
|
||||
break;
|
||||
case AMDGPU_HW_IP_DMA:
|
||||
*out_ring = &adev->sdma.instance[ring].ring;
|
||||
out_ring = &adev->sdma.instance[ring].ring;
|
||||
num_rings = adev->sdma.num_instances;
|
||||
break;
|
||||
case AMDGPU_HW_IP_UVD:
|
||||
*out_ring = &adev->uvd.inst[0].ring;
|
||||
out_ring = &adev->uvd.inst[0].ring;
|
||||
num_rings = adev->uvd.num_uvd_inst;
|
||||
break;
|
||||
case AMDGPU_HW_IP_VCE:
|
||||
*out_ring = &adev->vce.ring[ring];
|
||||
out_ring = &adev->vce.ring[ring];
|
||||
num_rings = adev->vce.num_rings;
|
||||
break;
|
||||
case AMDGPU_HW_IP_UVD_ENC:
|
||||
*out_ring = &adev->uvd.inst[0].ring_enc[ring];
|
||||
out_ring = &adev->uvd.inst[0].ring_enc[ring];
|
||||
num_rings = adev->uvd.num_enc_rings;
|
||||
break;
|
||||
case AMDGPU_HW_IP_VCN_DEC:
|
||||
*out_ring = &adev->vcn.ring_dec;
|
||||
out_ring = &adev->vcn.ring_dec;
|
||||
num_rings = 1;
|
||||
break;
|
||||
case AMDGPU_HW_IP_VCN_ENC:
|
||||
*out_ring = &adev->vcn.ring_enc[ring];
|
||||
out_ring = &adev->vcn.ring_enc[ring];
|
||||
num_rings = adev->vcn.num_enc_rings;
|
||||
break;
|
||||
case AMDGPU_HW_IP_VCN_JPEG:
|
||||
*out_ring = &adev->vcn.ring_jpeg;
|
||||
out_ring = &adev->vcn.ring_jpeg;
|
||||
num_rings = 1;
|
||||
break;
|
||||
default:
|
||||
@ -209,6 +212,7 @@ int amdgpu_ctx_get_ring(struct amdgpu_ctx *ctx,
|
||||
if (ring > num_rings)
|
||||
return -EINVAL;
|
||||
|
||||
*entity = &ctx->rings[out_ring->idx].entity;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -414,13 +418,14 @@ int amdgpu_ctx_put(struct amdgpu_ctx *ctx)
|
||||
return 0;
|
||||
}
|
||||
|
||||
int amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring,
|
||||
struct dma_fence *fence, uint64_t* handler)
|
||||
int amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx,
|
||||
struct drm_sched_entity *entity,
|
||||
struct dma_fence *fence, uint64_t* handle)
|
||||
{
|
||||
struct amdgpu_ctx_ring *cring = & ctx->rings[ring->idx];
|
||||
struct amdgpu_ctx_ring *cring = to_amdgpu_ctx_ring(entity);
|
||||
uint64_t seq = cring->sequence;
|
||||
unsigned idx = 0;
|
||||
struct dma_fence *other = NULL;
|
||||
unsigned idx = 0;
|
||||
|
||||
idx = seq & (amdgpu_sched_jobs - 1);
|
||||
other = cring->fences[idx];
|
||||
@ -435,22 +440,23 @@ int amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring,
|
||||
spin_unlock(&ctx->ring_lock);
|
||||
|
||||
dma_fence_put(other);
|
||||
if (handler)
|
||||
*handler = seq;
|
||||
if (handle)
|
||||
*handle = seq;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
|
||||
struct amdgpu_ring *ring, uint64_t seq)
|
||||
struct drm_sched_entity *entity,
|
||||
uint64_t seq)
|
||||
{
|
||||
struct amdgpu_ctx_ring *cring = & ctx->rings[ring->idx];
|
||||
struct amdgpu_ctx_ring *cring = to_amdgpu_ctx_ring(entity);
|
||||
struct dma_fence *fence;
|
||||
|
||||
spin_lock(&ctx->ring_lock);
|
||||
|
||||
if (seq == ~0ull)
|
||||
seq = ctx->rings[ring->idx].sequence - 1;
|
||||
seq = cring->sequence - 1;
|
||||
|
||||
if (seq >= cring->sequence) {
|
||||
spin_unlock(&ctx->ring_lock);
|
||||
@ -494,9 +500,10 @@ void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx,
|
||||
}
|
||||
}
|
||||
|
||||
int amdgpu_ctx_wait_prev_fence(struct amdgpu_ctx *ctx, unsigned ring_id)
|
||||
int amdgpu_ctx_wait_prev_fence(struct amdgpu_ctx *ctx,
|
||||
struct drm_sched_entity *entity)
|
||||
{
|
||||
struct amdgpu_ctx_ring *cring = &ctx->rings[ring_id];
|
||||
struct amdgpu_ctx_ring *cring = to_amdgpu_ctx_ring(entity);
|
||||
unsigned idx = cring->sequence & (amdgpu_sched_jobs - 1);
|
||||
struct dma_fence *other = cring->fences[idx];
|
||||
|
||||
|
@ -61,20 +61,22 @@ struct amdgpu_ctx_mgr {
|
||||
struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id);
|
||||
int amdgpu_ctx_put(struct amdgpu_ctx *ctx);
|
||||
|
||||
int amdgpu_ctx_get_ring(struct amdgpu_ctx *ctx,
|
||||
u32 hw_ip, u32 instance, u32 ring,
|
||||
struct amdgpu_ring **out_ring);
|
||||
int amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring,
|
||||
int amdgpu_ctx_get_entity(struct amdgpu_ctx *ctx, u32 hw_ip, u32 instance,
|
||||
u32 ring, struct drm_sched_entity **entity);
|
||||
int amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx,
|
||||
struct drm_sched_entity *entity,
|
||||
struct dma_fence *fence, uint64_t *seq);
|
||||
struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
|
||||
struct amdgpu_ring *ring, uint64_t seq);
|
||||
struct drm_sched_entity *entity,
|
||||
uint64_t seq);
|
||||
void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx,
|
||||
enum drm_sched_priority priority);
|
||||
|
||||
int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *filp);
|
||||
|
||||
int amdgpu_ctx_wait_prev_fence(struct amdgpu_ctx *ctx, unsigned ring_id);
|
||||
int amdgpu_ctx_wait_prev_fence(struct amdgpu_ctx *ctx,
|
||||
struct drm_sched_entity *entity);
|
||||
|
||||
void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr);
|
||||
void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr);
|
||||
|
@ -150,10 +150,10 @@ TRACE_EVENT(amdgpu_cs,
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->bo_list = p->bo_list;
|
||||
__entry->ring = p->ring->idx;
|
||||
__entry->ring = to_amdgpu_ring(p->entity->rq->sched)->idx;
|
||||
__entry->dw = p->job->ibs[i].length_dw;
|
||||
__entry->fences = amdgpu_fence_count_emitted(
|
||||
p->ring);
|
||||
to_amdgpu_ring(p->entity->rq->sched));
|
||||
),
|
||||
TP_printk("bo_list=%p, ring=%u, dw=%u, fences=%u",
|
||||
__entry->bo_list, __entry->ring, __entry->dw,
|
||||
|
@ -1264,11 +1264,12 @@ static int uvd_v7_0_ring_test_ring(struct amdgpu_ring *ring)
|
||||
static int uvd_v7_0_ring_patch_cs_in_place(struct amdgpu_cs_parser *p,
|
||||
uint32_t ib_idx)
|
||||
{
|
||||
struct amdgpu_ring *ring = to_amdgpu_ring(p->entity->rq->sched);
|
||||
struct amdgpu_ib *ib = &p->job->ibs[ib_idx];
|
||||
unsigned i;
|
||||
|
||||
/* No patching necessary for the first instance */
|
||||
if (!p->ring->me)
|
||||
if (!ring->me)
|
||||
return 0;
|
||||
|
||||
for (i = 0; i < ib->length_dw; i += 2) {
|
||||
|
Loading…
Reference in New Issue
Block a user