drm/amdgpu: abstract amdgpu_job for scheduler
Signed-off-by: Chunming Zhou <david1.zhou@amd.com> Reviewed-by: Christian K?nig <christian.koenig@amd.com>
This commit is contained in:
		
							parent
							
								
									6055f37afd
								
							
						
					
					
						commit
						bb977d3711
					
				| @ -183,6 +183,7 @@ struct amdgpu_vm; | ||||
| struct amdgpu_ring; | ||||
| struct amdgpu_semaphore; | ||||
| struct amdgpu_cs_parser; | ||||
| struct amdgpu_job; | ||||
| struct amdgpu_irq_src; | ||||
| struct amdgpu_fpriv; | ||||
| 
 | ||||
| @ -871,7 +872,7 @@ int amdgpu_sched_ib_submit_kernel_helper(struct amdgpu_device *adev, | ||||
| 					 struct amdgpu_ring *ring, | ||||
| 					 struct amdgpu_ib *ibs, | ||||
| 					 unsigned num_ibs, | ||||
| 					 int (*free_job)(struct amdgpu_cs_parser *), | ||||
| 					 int (*free_job)(struct amdgpu_job *), | ||||
| 					 void *owner, | ||||
| 					 struct fence **fence); | ||||
| 
 | ||||
| @ -1040,6 +1041,7 @@ void amdgpu_ctx_fini(struct amdgpu_ctx *ctx); | ||||
| 
 | ||||
| struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id); | ||||
| int amdgpu_ctx_put(struct amdgpu_ctx *ctx); | ||||
| struct amdgpu_ctx *amdgpu_ctx_get_ref(struct amdgpu_ctx *ctx); | ||||
| 
 | ||||
| uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring, | ||||
| 			      struct fence *fence, uint64_t queued_seq); | ||||
| @ -1265,6 +1267,18 @@ struct amdgpu_cs_parser { | ||||
| 	struct amd_sched_fence *s_fence; | ||||
| }; | ||||
| 
 | ||||
| struct amdgpu_job { | ||||
| 	struct amd_sched_job    base; | ||||
| 	struct amdgpu_device	*adev; | ||||
| 	struct amdgpu_ctx	*ctx; | ||||
| 	struct drm_file		*owner; | ||||
| 	struct amdgpu_ib	*ibs; | ||||
| 	uint32_t		num_ibs; | ||||
| 	struct mutex            job_lock; | ||||
| 	struct amdgpu_user_fence uf; | ||||
| 	int (*free_job)(struct amdgpu_job *sched_job); | ||||
| }; | ||||
| 
 | ||||
| static inline u32 amdgpu_get_ib_value(struct amdgpu_cs_parser *p, uint32_t ib_idx, int idx) | ||||
| { | ||||
| 	return p->ibs[ib_idx].ptr[idx]; | ||||
|  | ||||
| @ -126,19 +126,6 @@ int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type, | ||||
| 	return 0; | ||||
| } | ||||
| 
 | ||||
| static void amdgpu_job_work_func(struct work_struct *work) | ||||
| { | ||||
| 	struct amdgpu_cs_parser *sched_job = | ||||
| 		container_of(work, struct amdgpu_cs_parser, | ||||
| 			     job_work); | ||||
| 	mutex_lock(&sched_job->job_lock); | ||||
| 	if (sched_job->free_job) | ||||
| 		sched_job->free_job(sched_job); | ||||
| 	mutex_unlock(&sched_job->job_lock); | ||||
| 	/* after processing job, free memory */ | ||||
| 	fence_put(&sched_job->s_fence->base); | ||||
| 	kfree(sched_job); | ||||
| } | ||||
| struct amdgpu_cs_parser *amdgpu_cs_parser_create(struct amdgpu_device *adev, | ||||
|                                                struct drm_file *filp, | ||||
|                                                struct amdgpu_ctx *ctx, | ||||
| @ -157,10 +144,6 @@ struct amdgpu_cs_parser *amdgpu_cs_parser_create(struct amdgpu_device *adev, | ||||
| 	parser->ctx = ctx; | ||||
| 	parser->ibs = ibs; | ||||
| 	parser->num_ibs = num_ibs; | ||||
| 	if (amdgpu_enable_scheduler) { | ||||
| 		mutex_init(&parser->job_lock); | ||||
| 		INIT_WORK(&parser->job_work, amdgpu_job_work_func); | ||||
| 	} | ||||
| 	for (i = 0; i < num_ibs; i++) | ||||
| 		ibs[i].ctx = ctx; | ||||
| 
 | ||||
| @ -508,15 +491,17 @@ static void amdgpu_cs_parser_fini_late(struct amdgpu_cs_parser *parser) | ||||
| 	for (i = 0; i < parser->nchunks; i++) | ||||
| 		drm_free_large(parser->chunks[i].kdata); | ||||
| 	kfree(parser->chunks); | ||||
| 	if (parser->ibs) | ||||
| 		for (i = 0; i < parser->num_ibs; i++) | ||||
| 			amdgpu_ib_free(parser->adev, &parser->ibs[i]); | ||||
| 	kfree(parser->ibs); | ||||
| 	if (parser->uf.bo) | ||||
| 		drm_gem_object_unreference_unlocked(&parser->uf.bo->gem_base); | ||||
| 
 | ||||
| 	if (!amdgpu_enable_scheduler) | ||||
| 		kfree(parser); | ||||
| 	{ | ||||
| 		if (parser->ibs) | ||||
| 			for (i = 0; i < parser->num_ibs; i++) | ||||
| 				amdgpu_ib_free(parser->adev, &parser->ibs[i]); | ||||
| 		kfree(parser->ibs); | ||||
| 		if (parser->uf.bo) | ||||
| 			drm_gem_object_unreference_unlocked(&parser->uf.bo->gem_base); | ||||
| 	} | ||||
| 
 | ||||
| 	kfree(parser); | ||||
| } | ||||
| 
 | ||||
| /**
 | ||||
| @ -533,12 +518,6 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, bo | ||||
|        amdgpu_cs_parser_fini_late(parser); | ||||
| } | ||||
| 
 | ||||
| static int amdgpu_cs_parser_free_job(struct amdgpu_cs_parser *sched_job) | ||||
| { | ||||
|        amdgpu_cs_parser_fini_late(sched_job); | ||||
|        return 0; | ||||
| } | ||||
| 
 | ||||
| static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p, | ||||
| 				   struct amdgpu_vm *vm) | ||||
| { | ||||
| @ -874,6 +853,19 @@ static struct amdgpu_ring *amdgpu_cs_parser_get_ring( | ||||
| 	return ring; | ||||
| } | ||||
| 
 | ||||
| static int amdgpu_cs_free_job(struct amdgpu_job *sched_job) | ||||
| { | ||||
| 	int i; | ||||
| 	amdgpu_ctx_put(sched_job->ctx); | ||||
| 	if (sched_job->ibs) | ||||
| 		for (i = 0; i < sched_job->num_ibs; i++) | ||||
| 			amdgpu_ib_free(sched_job->adev, &sched_job->ibs[i]); | ||||
| 	kfree(sched_job->ibs); | ||||
| 	if (sched_job->uf.bo) | ||||
| 		drm_gem_object_unreference_unlocked(&sched_job->uf.bo->gem_base); | ||||
| 	return 0; | ||||
| } | ||||
| 
 | ||||
| int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) | ||||
| { | ||||
| 	struct amdgpu_device *adev = dev->dev_private; | ||||
| @ -900,33 +892,50 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) | ||||
| 	} | ||||
| 
 | ||||
| 	if (amdgpu_enable_scheduler && parser->num_ibs) { | ||||
| 		struct amdgpu_job *job; | ||||
| 		struct amdgpu_ring * ring = | ||||
| 			amdgpu_cs_parser_get_ring(adev, parser); | ||||
| 		r = amdgpu_cs_parser_prepare_job(parser); | ||||
| 		if (r) | ||||
| 			goto out; | ||||
| 		parser->ring = ring; | ||||
| 		parser->free_job = amdgpu_cs_parser_free_job; | ||||
| 		mutex_lock(&parser->job_lock); | ||||
| 		r = amd_sched_push_job(ring->scheduler, | ||||
| 				       &parser->ctx->rings[ring->idx].entity, | ||||
| 				       parser, | ||||
| 				       &parser->s_fence); | ||||
| 		job = kzalloc(sizeof(struct amdgpu_job), GFP_KERNEL); | ||||
| 		if (!job) | ||||
| 			return -ENOMEM; | ||||
| 		job->base.sched = ring->scheduler; | ||||
| 		job->base.s_entity = &parser->ctx->rings[ring->idx].entity; | ||||
| 		job->adev = parser->adev; | ||||
| 		job->ibs = parser->ibs; | ||||
| 		job->num_ibs = parser->num_ibs; | ||||
| 		job->owner = parser->filp; | ||||
| 		job->ctx = amdgpu_ctx_get_ref(parser->ctx); | ||||
| 		mutex_init(&job->job_lock); | ||||
| 		if (job->ibs[job->num_ibs - 1].user) { | ||||
| 			memcpy(&job->uf,  &parser->uf, | ||||
| 			       sizeof(struct amdgpu_user_fence)); | ||||
| 			job->ibs[job->num_ibs - 1].user = &job->uf; | ||||
| 		} | ||||
| 
 | ||||
| 		job->free_job = amdgpu_cs_free_job; | ||||
| 		mutex_lock(&job->job_lock); | ||||
| 		r = amd_sched_push_job((struct amd_sched_job *)job); | ||||
| 		if (r) { | ||||
| 			mutex_unlock(&parser->job_lock); | ||||
| 			mutex_unlock(&job->job_lock); | ||||
| 			amdgpu_cs_free_job(job); | ||||
| 			kfree(job); | ||||
| 			goto out; | ||||
| 		} | ||||
| 		parser->ibs[parser->num_ibs - 1].sequence = | ||||
| 			amdgpu_ctx_add_fence(parser->ctx, ring, | ||||
| 					     &parser->s_fence->base, | ||||
| 					     parser->s_fence->v_seq); | ||||
| 		cs->out.handle = parser->s_fence->v_seq; | ||||
| 		job->ibs[parser->num_ibs - 1].sequence = | ||||
| 			amdgpu_ctx_add_fence(job->ctx, ring, | ||||
| 					     &job->base.s_fence->base, | ||||
| 					     job->base.s_fence->v_seq); | ||||
| 		cs->out.handle = job->base.s_fence->v_seq; | ||||
| 		list_sort(NULL, &parser->validated, cmp_size_smaller_first); | ||||
| 		ttm_eu_fence_buffer_objects(&parser->ticket, | ||||
| 				&parser->validated, | ||||
| 				&parser->s_fence->base); | ||||
| 				&job->base.s_fence->base); | ||||
| 
 | ||||
| 		mutex_unlock(&parser->job_lock); | ||||
| 		mutex_unlock(&job->job_lock); | ||||
| 		amdgpu_cs_parser_fini_late(parser); | ||||
| 		up_read(&adev->exclusive_lock); | ||||
| 		return 0; | ||||
| 	} | ||||
|  | ||||
| @ -219,6 +219,13 @@ struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id) | ||||
| 	return ctx; | ||||
| } | ||||
| 
 | ||||
| struct amdgpu_ctx *amdgpu_ctx_get_ref(struct amdgpu_ctx *ctx) | ||||
| { | ||||
| 	if (ctx) | ||||
| 		kref_get(&ctx->refcount); | ||||
| 	return ctx; | ||||
| } | ||||
| 
 | ||||
| int amdgpu_ctx_put(struct amdgpu_ctx *ctx) | ||||
| { | ||||
| 	if (ctx == NULL) | ||||
|  | ||||
| @ -27,81 +27,58 @@ | ||||
| #include <drm/drmP.h> | ||||
| #include "amdgpu.h" | ||||
| 
 | ||||
| static int amdgpu_sched_prepare_job(struct amd_gpu_scheduler *sched, | ||||
| 				    struct amd_sched_entity *entity, | ||||
| 				    struct amd_sched_job *job) | ||||
| { | ||||
| 	int r = 0; | ||||
| 	struct amdgpu_cs_parser *sched_job; | ||||
| 	if (!job || !job->data) { | ||||
| 		DRM_ERROR("job is null\n"); | ||||
| 		return -EINVAL; | ||||
| 	} | ||||
| 
 | ||||
| 	sched_job = (struct amdgpu_cs_parser *)job->data; | ||||
| 	if (sched_job->prepare_job) { | ||||
| 		r = sched_job->prepare_job(sched_job); | ||||
| 		if (r) { | ||||
| 			DRM_ERROR("Prepare job error\n"); | ||||
| 			schedule_work(&sched_job->job_work); | ||||
| 		} | ||||
| 	} | ||||
| 	return r; | ||||
| } | ||||
| 
 | ||||
| static struct fence *amdgpu_sched_run_job(struct amd_gpu_scheduler *sched, | ||||
| 					  struct amd_sched_entity *entity, | ||||
| 					  struct amd_sched_job *job) | ||||
| { | ||||
| 	int r = 0; | ||||
| 	struct amdgpu_cs_parser *sched_job; | ||||
| 	struct amdgpu_job *sched_job; | ||||
| 	struct amdgpu_fence *fence; | ||||
| 
 | ||||
| 	if (!job || !job->data) { | ||||
| 	if (!job) { | ||||
| 		DRM_ERROR("job is null\n"); | ||||
| 		return NULL; | ||||
| 	} | ||||
| 	sched_job = (struct amdgpu_cs_parser *)job->data; | ||||
| 	sched_job = (struct amdgpu_job *)job; | ||||
| 	mutex_lock(&sched_job->job_lock); | ||||
| 	r = amdgpu_ib_schedule(sched_job->adev, | ||||
| 			       sched_job->num_ibs, | ||||
| 			       sched_job->ibs, | ||||
| 			       sched_job->filp); | ||||
| 			       sched_job->owner); | ||||
| 	if (r) | ||||
| 		goto err; | ||||
| 	fence = amdgpu_fence_ref(sched_job->ibs[sched_job->num_ibs - 1].fence); | ||||
| 
 | ||||
| 	if (sched_job->run_job) { | ||||
| 		r = sched_job->run_job(sched_job); | ||||
| 		if (r) | ||||
| 			goto err; | ||||
| 	} | ||||
| 
 | ||||
| 	mutex_unlock(&sched_job->job_lock); | ||||
| 	return &fence->base; | ||||
| 
 | ||||
| err: | ||||
| 	DRM_ERROR("Run job error\n"); | ||||
| 	mutex_unlock(&sched_job->job_lock); | ||||
| 	schedule_work(&sched_job->job_work); | ||||
| 	sched->ops->process_job(sched, (struct amd_sched_job *)sched_job); | ||||
| 	return NULL; | ||||
| } | ||||
| 
 | ||||
| static void amdgpu_sched_process_job(struct amd_gpu_scheduler *sched, | ||||
| 				     struct amd_sched_job *job) | ||||
| { | ||||
| 	struct amdgpu_cs_parser *sched_job; | ||||
| 	struct amdgpu_job *sched_job; | ||||
| 
 | ||||
| 	if (!job || !job->data) { | ||||
| 	if (!job) { | ||||
| 		DRM_ERROR("job is null\n"); | ||||
| 		return; | ||||
| 	} | ||||
| 	sched_job = (struct amdgpu_cs_parser *)job->data; | ||||
| 	schedule_work(&sched_job->job_work); | ||||
| 	sched_job = (struct amdgpu_job *)job; | ||||
| 	mutex_lock(&sched_job->job_lock); | ||||
| 	if (sched_job->free_job) | ||||
| 		sched_job->free_job(sched_job); | ||||
| 	mutex_unlock(&sched_job->job_lock); | ||||
| 	/* after processing job, free memory */ | ||||
| 	fence_put(&sched_job->base.s_fence->base); | ||||
| 	kfree(sched_job); | ||||
| } | ||||
| 
 | ||||
| struct amd_sched_backend_ops amdgpu_sched_ops = { | ||||
| 	.prepare_job = amdgpu_sched_prepare_job, | ||||
| 	.run_job = amdgpu_sched_run_job, | ||||
| 	.process_job = amdgpu_sched_process_job | ||||
| }; | ||||
| @ -110,31 +87,34 @@ int amdgpu_sched_ib_submit_kernel_helper(struct amdgpu_device *adev, | ||||
| 					 struct amdgpu_ring *ring, | ||||
| 					 struct amdgpu_ib *ibs, | ||||
| 					 unsigned num_ibs, | ||||
| 					 int (*free_job)(struct amdgpu_cs_parser *), | ||||
| 					 int (*free_job)(struct amdgpu_job *), | ||||
| 					 void *owner, | ||||
| 					 struct fence **f) | ||||
| { | ||||
| 	int r = 0; | ||||
| 	if (amdgpu_enable_scheduler) { | ||||
| 		struct amdgpu_cs_parser *sched_job = | ||||
| 			amdgpu_cs_parser_create(adev, owner, &adev->kernel_ctx, | ||||
| 						ibs, num_ibs); | ||||
| 		if(!sched_job) { | ||||
| 		struct amdgpu_job *job = | ||||
| 			kzalloc(sizeof(struct amdgpu_job), GFP_KERNEL); | ||||
| 		if (!job) | ||||
| 			return -ENOMEM; | ||||
| 		} | ||||
| 		sched_job->free_job = free_job; | ||||
| 		mutex_lock(&sched_job->job_lock); | ||||
| 		r = amd_sched_push_job(ring->scheduler, | ||||
| 				       &adev->kernel_ctx.rings[ring->idx].entity, | ||||
| 				       sched_job, &sched_job->s_fence); | ||||
| 		job->base.sched = ring->scheduler; | ||||
| 		job->base.s_entity = &adev->kernel_ctx.rings[ring->idx].entity; | ||||
| 		job->adev = adev; | ||||
| 		job->ibs = ibs; | ||||
| 		job->num_ibs = num_ibs; | ||||
| 		job->owner = owner; | ||||
| 		mutex_init(&job->job_lock); | ||||
| 		job->free_job = free_job; | ||||
| 		mutex_lock(&job->job_lock); | ||||
| 		r = amd_sched_push_job((struct amd_sched_job *)job); | ||||
| 		if (r) { | ||||
| 			mutex_unlock(&sched_job->job_lock); | ||||
| 			kfree(sched_job); | ||||
| 			mutex_unlock(&job->job_lock); | ||||
| 			kfree(job); | ||||
| 			return r; | ||||
| 		} | ||||
| 		ibs[num_ibs - 1].sequence = sched_job->s_fence->v_seq; | ||||
| 		*f = fence_get(&sched_job->s_fence->base); | ||||
| 		mutex_unlock(&sched_job->job_lock); | ||||
| 		ibs[num_ibs - 1].sequence = job->base.s_fence->v_seq; | ||||
| 		*f = fence_get(&job->base.s_fence->base); | ||||
| 		mutex_unlock(&job->job_lock); | ||||
| 	} else { | ||||
| 		r = amdgpu_ib_schedule(adev, num_ibs, ibs, owner); | ||||
| 		if (r) | ||||
|  | ||||
| @ -807,7 +807,7 @@ int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx) | ||||
| } | ||||
| 
 | ||||
| static int amdgpu_uvd_free_job( | ||||
| 	struct amdgpu_cs_parser *sched_job) | ||||
| 	struct amdgpu_job *sched_job) | ||||
| { | ||||
| 	amdgpu_ib_free(sched_job->adev, sched_job->ibs); | ||||
| 	kfree(sched_job->ibs); | ||||
|  | ||||
| @ -340,7 +340,7 @@ void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp) | ||||
| } | ||||
| 
 | ||||
| static int amdgpu_vce_free_job( | ||||
| 	struct amdgpu_cs_parser *sched_job) | ||||
| 	struct amdgpu_job *sched_job) | ||||
| { | ||||
| 	amdgpu_ib_free(sched_job->adev, sched_job->ibs); | ||||
| 	kfree(sched_job->ibs); | ||||
|  | ||||
| @ -307,7 +307,7 @@ static void amdgpu_vm_update_pages(struct amdgpu_device *adev, | ||||
| } | ||||
| 
 | ||||
| static int amdgpu_vm_free_job( | ||||
| 	struct amdgpu_cs_parser *sched_job) | ||||
| 	struct amdgpu_job *sched_job) | ||||
| { | ||||
| 	int i; | ||||
| 	for (i = 0; i < sched_job->num_ibs; i++) | ||||
|  | ||||
| @ -282,30 +282,18 @@ int amd_sched_entity_fini(struct amd_gpu_scheduler *sched, | ||||
|  *	     scheduler consum some queued command. | ||||
|  *	  -1 other fail. | ||||
| */ | ||||
| int amd_sched_push_job(struct amd_gpu_scheduler *sched, | ||||
| 		       struct amd_sched_entity *c_entity, | ||||
| 		       void *data, | ||||
| 		       struct amd_sched_fence **fence) | ||||
| int amd_sched_push_job(struct amd_sched_job *sched_job) | ||||
| { | ||||
| 	struct amd_sched_job *job; | ||||
| 
 | ||||
| 	struct amd_sched_fence 	*fence = | ||||
| 		amd_sched_fence_create(sched_job->s_entity); | ||||
| 	if (!fence) | ||||
| 		return -EINVAL; | ||||
| 	job = kzalloc(sizeof(struct amd_sched_job), GFP_KERNEL); | ||||
| 	if (!job) | ||||
| 		return -ENOMEM; | ||||
| 	job->sched = sched; | ||||
| 	job->s_entity = c_entity; | ||||
| 	job->data = data; | ||||
| 	*fence = amd_sched_fence_create(c_entity); | ||||
| 	if ((*fence) == NULL) { | ||||
| 		kfree(job); | ||||
| 		return -EINVAL; | ||||
| 	} | ||||
| 	fence_get(&(*fence)->base); | ||||
| 	job->s_fence = *fence; | ||||
| 	while (kfifo_in_spinlocked(&c_entity->job_queue, &job, sizeof(void *), | ||||
| 				   &c_entity->queue_lock) != sizeof(void *)) { | ||||
| 	fence_get(&fence->base); | ||||
| 	sched_job->s_fence = fence; | ||||
| 	while (kfifo_in_spinlocked(&sched_job->s_entity->job_queue, | ||||
| 				   &sched_job, sizeof(void *), | ||||
| 				   &sched_job->s_entity->queue_lock) != | ||||
| 	       sizeof(void *)) { | ||||
| 		/**
 | ||||
| 		 * Current context used up all its IB slots | ||||
| 		 * wait here, or need to check whether GPU is hung | ||||
| @ -313,8 +301,8 @@ int amd_sched_push_job(struct amd_gpu_scheduler *sched, | ||||
| 		schedule(); | ||||
| 	} | ||||
| 	/* first job wake up scheduler */ | ||||
| 	if ((kfifo_len(&c_entity->job_queue) / sizeof(void *)) == 1) | ||||
| 		wake_up_interruptible(&sched->wait_queue); | ||||
| 	if ((kfifo_len(&sched_job->s_entity->job_queue) / sizeof(void *)) == 1) | ||||
| 		wake_up_interruptible(&sched_job->sched->wait_queue); | ||||
| 	return 0; | ||||
| } | ||||
| 
 | ||||
| @ -333,10 +321,8 @@ static void amd_sched_process_job(struct fence *f, struct fence_cb *cb) | ||||
| 	list_del(&sched_job->list); | ||||
| 	atomic64_dec(&sched->hw_rq_count); | ||||
| 	spin_unlock_irqrestore(&sched->queue_lock, flags); | ||||
| 
 | ||||
| 	sched->ops->process_job(sched, sched_job); | ||||
| 	fence_put(&sched_job->s_fence->base); | ||||
| 	kfree(sched_job); | ||||
| 	sched->ops->process_job(sched, sched_job); | ||||
| 	wake_up_interruptible(&sched->wait_queue); | ||||
| } | ||||
| 
 | ||||
| @ -359,7 +345,9 @@ static int amd_sched_main(void *param) | ||||
| 		r = kfifo_out(&c_entity->job_queue, &job, sizeof(void *)); | ||||
| 		if (r != sizeof(void *)) | ||||
| 			continue; | ||||
| 		r = sched->ops->prepare_job(sched, c_entity, job); | ||||
| 		r = 0; | ||||
| 		if (sched->ops->prepare_job) | ||||
| 			r = sched->ops->prepare_job(sched, c_entity, job); | ||||
| 		if (!r) { | ||||
| 			unsigned long flags; | ||||
| 			spin_lock_irqsave(&sched->queue_lock, flags); | ||||
|  | ||||
| @ -81,7 +81,6 @@ struct amd_sched_job { | ||||
| 	struct fence_cb                 cb; | ||||
| 	struct amd_gpu_scheduler        *sched; | ||||
| 	struct amd_sched_entity         *s_entity; | ||||
| 	void                            *data; | ||||
| 	struct amd_sched_fence          *s_fence; | ||||
| }; | ||||
| 
 | ||||
| @ -140,10 +139,7 @@ struct amd_gpu_scheduler *amd_sched_create(void *device, | ||||
| 				uint32_t hw_submission); | ||||
| int amd_sched_destroy(struct amd_gpu_scheduler *sched); | ||||
| 
 | ||||
| int amd_sched_push_job(struct amd_gpu_scheduler *sched, | ||||
| 		       struct amd_sched_entity *c_entity, | ||||
| 		       void *data, | ||||
| 		       struct amd_sched_fence **fence); | ||||
| int amd_sched_push_job(struct amd_sched_job *sched_job); | ||||
| 
 | ||||
| int amd_sched_entity_init(struct amd_gpu_scheduler *sched, | ||||
| 			  struct amd_sched_entity *entity, | ||||
|  | ||||
		Loading…
	
		Reference in New Issue
	
	Block a user