forked from Minki/linux
drm/amdgpu: prepare job before push to sw queue for pte ring
user mode will still use pte ring as a normal ring. if the prepare job generates another command(update pte) on its ring in scheduler, then will kill scheduler which is going to waiting later job but pending running job. Signed-off-by: Chunming Zhou <david1.zhou@amd.com> Acked-by: Christian K?nig <christian.koenig@amd.com> Reviewed-by: Jammy Zhou <Jammy.Zhou@amd.com>
This commit is contained in:
parent
4b559c90bc
commit
4274f5d45c
@ -902,6 +902,7 @@ struct amdgpu_ring {
|
|||||||
struct amdgpu_ctx *current_ctx;
|
struct amdgpu_ctx *current_ctx;
|
||||||
enum amdgpu_ring_type type;
|
enum amdgpu_ring_type type;
|
||||||
char name[16];
|
char name[16];
|
||||||
|
bool is_pte_ring;
|
||||||
};
|
};
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -909,7 +909,7 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
|
|||||||
amdgpu_cs_parser_get_ring(adev, parser);
|
amdgpu_cs_parser_get_ring(adev, parser);
|
||||||
parser->uf.sequence = atomic64_inc_return(
|
parser->uf.sequence = atomic64_inc_return(
|
||||||
&parser->ctx->rings[ring->idx].c_entity.last_queued_v_seq);
|
&parser->ctx->rings[ring->idx].c_entity.last_queued_v_seq);
|
||||||
if ((parser->bo_list && parser->bo_list->has_userptr)) {
|
if (ring->is_pte_ring || (parser->bo_list && parser->bo_list->has_userptr)) {
|
||||||
r = amdgpu_cs_parser_prepare_job(parser);
|
r = amdgpu_cs_parser_prepare_job(parser);
|
||||||
if (r)
|
if (r)
|
||||||
goto out;
|
goto out;
|
||||||
|
@ -1404,5 +1404,6 @@ static void cik_sdma_set_vm_pte_funcs(struct amdgpu_device *adev)
|
|||||||
if (adev->vm_manager.vm_pte_funcs == NULL) {
|
if (adev->vm_manager.vm_pte_funcs == NULL) {
|
||||||
adev->vm_manager.vm_pte_funcs = &cik_sdma_vm_pte_funcs;
|
adev->vm_manager.vm_pte_funcs = &cik_sdma_vm_pte_funcs;
|
||||||
adev->vm_manager.vm_pte_funcs_ring = &adev->sdma[0].ring;
|
adev->vm_manager.vm_pte_funcs_ring = &adev->sdma[0].ring;
|
||||||
|
adev->vm_manager.vm_pte_funcs_ring->is_pte_ring = true;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1415,5 +1415,6 @@ static void sdma_v2_4_set_vm_pte_funcs(struct amdgpu_device *adev)
|
|||||||
if (adev->vm_manager.vm_pte_funcs == NULL) {
|
if (adev->vm_manager.vm_pte_funcs == NULL) {
|
||||||
adev->vm_manager.vm_pte_funcs = &sdma_v2_4_vm_pte_funcs;
|
adev->vm_manager.vm_pte_funcs = &sdma_v2_4_vm_pte_funcs;
|
||||||
adev->vm_manager.vm_pte_funcs_ring = &adev->sdma[0].ring;
|
adev->vm_manager.vm_pte_funcs_ring = &adev->sdma[0].ring;
|
||||||
|
adev->vm_manager.vm_pte_funcs_ring->is_pte_ring = true;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1540,5 +1540,6 @@ static void sdma_v3_0_set_vm_pte_funcs(struct amdgpu_device *adev)
|
|||||||
if (adev->vm_manager.vm_pte_funcs == NULL) {
|
if (adev->vm_manager.vm_pte_funcs == NULL) {
|
||||||
adev->vm_manager.vm_pte_funcs = &sdma_v3_0_vm_pte_funcs;
|
adev->vm_manager.vm_pte_funcs = &sdma_v3_0_vm_pte_funcs;
|
||||||
adev->vm_manager.vm_pte_funcs_ring = &adev->sdma[0].ring;
|
adev->vm_manager.vm_pte_funcs_ring = &adev->sdma[0].ring;
|
||||||
|
adev->vm_manager.vm_pte_funcs_ring->is_pte_ring = true;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user