forked from Minki/linux
drm/sched: move fence slab handling to module init/exit
This is the only part of the scheduler which must not be called from different drivers. Move it to module init/exit so it is done a single time when loading the scheduler. Reviewed-by: Chunming Zhou <david1.zhou@amd.com> Tested-by: Dieter Nützel <Dieter@nuetzel-hh.de> Acked-by: Alex Deucher <alexander.deucher@amd.com> Signed-off-by: Lucas Stach <l.stach@pengutronix.de> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
parent
1b1f42d8fd
commit
4983e48c85
@ -912,10 +912,6 @@ static int __init amdgpu_init(void)
|
||||
if (r)
|
||||
goto error_fence;
|
||||
|
||||
r = drm_sched_fence_slab_init();
|
||||
if (r)
|
||||
goto error_sched;
|
||||
|
||||
if (vgacon_text_force()) {
|
||||
DRM_ERROR("VGACON disables amdgpu kernel modesetting.\n");
|
||||
return -EINVAL;
|
||||
@ -928,9 +924,6 @@ static int __init amdgpu_init(void)
|
||||
/* let modprobe override vga console setting */
|
||||
return pci_register_driver(pdriver);
|
||||
|
||||
error_sched:
|
||||
amdgpu_fence_slab_fini();
|
||||
|
||||
error_fence:
|
||||
amdgpu_sync_fini();
|
||||
|
||||
@ -944,7 +937,6 @@ static void __exit amdgpu_exit(void)
|
||||
pci_unregister_driver(pdriver);
|
||||
amdgpu_unregister_atpx_handler();
|
||||
amdgpu_sync_fini();
|
||||
drm_sched_fence_slab_fini();
|
||||
amdgpu_fence_slab_fini();
|
||||
}
|
||||
|
||||
|
@ -29,7 +29,7 @@
|
||||
|
||||
static struct kmem_cache *sched_fence_slab;
|
||||
|
||||
int drm_sched_fence_slab_init(void)
|
||||
static int __init drm_sched_fence_slab_init(void)
|
||||
{
|
||||
sched_fence_slab = kmem_cache_create(
|
||||
"drm_sched_fence", sizeof(struct drm_sched_fence), 0,
|
||||
@ -39,14 +39,12 @@ int drm_sched_fence_slab_init(void)
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(drm_sched_fence_slab_init);
|
||||
|
||||
void drm_sched_fence_slab_fini(void)
|
||||
static void __exit drm_sched_fence_slab_fini(void)
|
||||
{
|
||||
rcu_barrier();
|
||||
kmem_cache_destroy(sched_fence_slab);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(drm_sched_fence_slab_fini);
|
||||
|
||||
void drm_sched_fence_scheduled(struct drm_sched_fence *fence)
|
||||
{
|
||||
@ -185,3 +183,9 @@ struct drm_sched_fence *drm_sched_fence_create(struct drm_sched_entity *entity,
|
||||
|
||||
return fence;
|
||||
}
|
||||
|
||||
module_init(drm_sched_fence_slab_init);
|
||||
module_exit(drm_sched_fence_slab_fini);
|
||||
|
||||
MODULE_DESCRIPTION("DRM GPU scheduler");
|
||||
MODULE_LICENSE("GPL and additional rights");
|
||||
|
@ -155,9 +155,6 @@ void drm_sched_entity_push_job(struct drm_sched_job *sched_job,
|
||||
void drm_sched_entity_set_rq(struct drm_sched_entity *entity,
|
||||
struct drm_sched_rq *rq);
|
||||
|
||||
int drm_sched_fence_slab_init(void);
|
||||
void drm_sched_fence_slab_fini(void);
|
||||
|
||||
struct drm_sched_fence *drm_sched_fence_create(
|
||||
struct drm_sched_entity *s_entity, void *owner);
|
||||
void drm_sched_fence_scheduled(struct drm_sched_fence *fence);
|
||||
|
Loading…
Reference in New Issue
Block a user