c530b02f39
Why: Previously hw fence is alloced separately with job. It caused historical lifetime issues and corner cases. The ideal situation is to take fence to manage both job and fence's lifetime, and simplify the design of gpu-scheduler. How: We propose to embed hw_fence into amdgpu_job. 1. We cover the normal job submission by this method. 2. For ib_test, and submit without a parent job keep the legacy way to create a hw fence separately. v2: use AMDGPU_FENCE_FLAG_EMBED_IN_JOB_BIT to show that the fence is embedded in a job. v3: remove redundant variable ring in amdgpu_job v4: add tdr sequence support for this feature. Add a job_run_counter to indicate whether this job is a resubmit job. v5 add missing handling in amdgpu_fence_enable_signaling Signed-off-by: Jingwen Chen <Jingwen.Chen2@amd.com> Signed-off-by: Jack Zhang <Jack.Zhang7@hotmail.com> Reviewed-by: Andrey Grodzovsky <andrey.grodzovsky@amd.com> Reviewed by: Monk Liu <monk.liu@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
85 lines
3.1 KiB
C
85 lines
3.1 KiB
C
/*
|
|
* Copyright 2018 Advanced Micro Devices, Inc.
|
|
*
|
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
|
* copy of this software and associated documentation files (the "Software"),
|
|
* to deal in the Software without restriction, including without limitation
|
|
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
|
* and/or sell copies of the Software, and to permit persons to whom the
|
|
* Software is furnished to do so, subject to the following conditions:
|
|
*
|
|
* The above copyright notice and this permission notice shall be included in
|
|
* all copies or substantial portions of the Software.
|
|
*
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
|
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
|
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
|
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
|
* OTHER DEALINGS IN THE SOFTWARE.
|
|
*
|
|
*/
|
|
#ifndef __AMDGPU_JOB_H__
|
|
#define __AMDGPU_JOB_H__
|
|
|
|
/* bit set means command submit involves a preamble IB */
|
|
#define AMDGPU_PREAMBLE_IB_PRESENT (1 << 0)
|
|
/* bit set means preamble IB is first presented in belonging context */
|
|
#define AMDGPU_PREAMBLE_IB_PRESENT_FIRST (1 << 1)
|
|
/* bit set means context switch occured */
|
|
#define AMDGPU_HAVE_CTX_SWITCH (1 << 2)
|
|
/* bit set means IB is preempted */
|
|
#define AMDGPU_IB_PREEMPTED (1 << 3)
|
|
|
|
#define to_amdgpu_job(sched_job) \
|
|
container_of((sched_job), struct amdgpu_job, base)
|
|
|
|
#define AMDGPU_JOB_GET_VMID(job) ((job) ? (job)->vmid : 0)
|
|
|
|
struct amdgpu_fence;
|
|
enum amdgpu_ib_pool_type;
|
|
|
|
struct amdgpu_job {
|
|
struct drm_sched_job base;
|
|
struct amdgpu_vm *vm;
|
|
struct amdgpu_sync sync;
|
|
struct amdgpu_sync sched_sync;
|
|
struct amdgpu_ib *ibs;
|
|
struct dma_fence hw_fence;
|
|
struct dma_fence *external_hw_fence;
|
|
uint32_t preamble_status;
|
|
uint32_t preemption_status;
|
|
uint32_t num_ibs;
|
|
bool vm_needs_flush;
|
|
uint64_t vm_pd_addr;
|
|
unsigned vmid;
|
|
unsigned pasid;
|
|
uint32_t gds_base, gds_size;
|
|
uint32_t gws_base, gws_size;
|
|
uint32_t oa_base, oa_size;
|
|
uint32_t vram_lost_counter;
|
|
|
|
/* user fence handling */
|
|
uint64_t uf_addr;
|
|
uint64_t uf_sequence;
|
|
|
|
/* job_run_counter >= 1 means a resubmit job */
|
|
uint32_t job_run_counter;
|
|
};
|
|
|
|
int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
|
|
struct amdgpu_job **job, struct amdgpu_vm *vm);
|
|
int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size,
|
|
enum amdgpu_ib_pool_type pool, struct amdgpu_job **job);
|
|
void amdgpu_job_free_resources(struct amdgpu_job *job);
|
|
void amdgpu_job_free(struct amdgpu_job *job);
|
|
int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity,
|
|
void *owner, struct dma_fence **f);
|
|
int amdgpu_job_submit_direct(struct amdgpu_job *job, struct amdgpu_ring *ring,
|
|
struct dma_fence **fence);
|
|
|
|
void amdgpu_job_stop_all_jobs_on_sched(struct drm_gpu_scheduler *sched);
|
|
|
|
#endif
|