drm/amdgpu:All UVD instances share one idle_work handle
All UVD instanses have only one dpm control, so it is better to share one idle_work handle. Signed-off-by: James Zhu <James.Zhu@amd.com> Reviewed-by: Alex Deucher <alexander.deucher@amd.com> Reviewed-by: Christian König <christian.koenig@amd.com> Tested-by: Stefan Agner <stefan@agner.ch> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
		
							parent
							
								
									d9fda24804
								
							
						
					
					
						commit
						5c53d19b76
					
				| @ -130,7 +130,7 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev) | ||||
| 	unsigned version_major, version_minor, family_id; | ||||
| 	int i, j, r; | ||||
| 
 | ||||
| 	INIT_DELAYED_WORK(&adev->uvd.inst->idle_work, amdgpu_uvd_idle_work_handler); | ||||
| 	INIT_DELAYED_WORK(&adev->uvd.idle_work, amdgpu_uvd_idle_work_handler); | ||||
| 
 | ||||
| 	switch (adev->asic_type) { | ||||
| #ifdef CONFIG_DRM_AMDGPU_CIK | ||||
| @ -314,12 +314,12 @@ int amdgpu_uvd_suspend(struct amdgpu_device *adev) | ||||
| 	void *ptr; | ||||
| 	int i, j; | ||||
| 
 | ||||
| 	cancel_delayed_work_sync(&adev->uvd.idle_work); | ||||
| 
 | ||||
| 	for (j = 0; j < adev->uvd.num_uvd_inst; ++j) { | ||||
| 		if (adev->uvd.inst[j].vcpu_bo == NULL) | ||||
| 			continue; | ||||
| 
 | ||||
| 		cancel_delayed_work_sync(&adev->uvd.inst[j].idle_work); | ||||
| 
 | ||||
| 		/* only valid for physical mode */ | ||||
| 		if (adev->asic_type < CHIP_POLARIS10) { | ||||
| 			for (i = 0; i < adev->uvd.max_handles; ++i) | ||||
| @ -1145,7 +1145,7 @@ int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, | ||||
| static void amdgpu_uvd_idle_work_handler(struct work_struct *work) | ||||
| { | ||||
| 	struct amdgpu_device *adev = | ||||
| 		container_of(work, struct amdgpu_device, uvd.inst->idle_work.work); | ||||
| 		container_of(work, struct amdgpu_device, uvd.idle_work.work); | ||||
| 	unsigned fences = 0, i, j; | ||||
| 
 | ||||
| 	for (i = 0; i < adev->uvd.num_uvd_inst; ++i) { | ||||
| @ -1167,7 +1167,7 @@ static void amdgpu_uvd_idle_work_handler(struct work_struct *work) | ||||
| 							       AMD_CG_STATE_GATE); | ||||
| 		} | ||||
| 	} else { | ||||
| 		schedule_delayed_work(&adev->uvd.inst->idle_work, UVD_IDLE_TIMEOUT); | ||||
| 		schedule_delayed_work(&adev->uvd.idle_work, UVD_IDLE_TIMEOUT); | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| @ -1179,7 +1179,7 @@ void amdgpu_uvd_ring_begin_use(struct amdgpu_ring *ring) | ||||
| 	if (amdgpu_sriov_vf(adev)) | ||||
| 		return; | ||||
| 
 | ||||
| 	set_clocks = !cancel_delayed_work_sync(&adev->uvd.inst->idle_work); | ||||
| 	set_clocks = !cancel_delayed_work_sync(&adev->uvd.idle_work); | ||||
| 	if (set_clocks) { | ||||
| 		if (adev->pm.dpm_enabled) { | ||||
| 			amdgpu_dpm_enable_uvd(adev, true); | ||||
| @ -1196,7 +1196,7 @@ void amdgpu_uvd_ring_begin_use(struct amdgpu_ring *ring) | ||||
| void amdgpu_uvd_ring_end_use(struct amdgpu_ring *ring) | ||||
| { | ||||
| 	if (!amdgpu_sriov_vf(ring->adev)) | ||||
| 		schedule_delayed_work(&ring->adev->uvd.inst->idle_work, UVD_IDLE_TIMEOUT); | ||||
| 		schedule_delayed_work(&ring->adev->uvd.idle_work, UVD_IDLE_TIMEOUT); | ||||
| } | ||||
| 
 | ||||
| /**
 | ||||
|  | ||||
| @ -44,7 +44,6 @@ struct amdgpu_uvd_inst { | ||||
| 	void			*saved_bo; | ||||
| 	atomic_t		handles[AMDGPU_MAX_UVD_HANDLES]; | ||||
| 	struct drm_file		*filp[AMDGPU_MAX_UVD_HANDLES]; | ||||
| 	struct delayed_work	idle_work; | ||||
| 	struct amdgpu_ring	ring; | ||||
| 	struct amdgpu_ring	ring_enc[AMDGPU_MAX_UVD_ENC_RINGS]; | ||||
| 	struct amdgpu_irq_src	irq; | ||||
| @ -62,6 +61,7 @@ struct amdgpu_uvd { | ||||
| 	bool			address_64_bit; | ||||
| 	bool			use_ctx_buf; | ||||
| 	struct amdgpu_uvd_inst		inst[AMDGPU_MAX_UVD_INSTANCES]; | ||||
| 	struct delayed_work	idle_work; | ||||
| }; | ||||
| 
 | ||||
| int amdgpu_uvd_sw_init(struct amdgpu_device *adev); | ||||
|  | ||||
		Loading…
	
		Reference in New Issue
	
	Block a user