drm/ttm: remove lazy parameter from ttm_bo_wait
Not used any more. Reviewed-by: Sinclair Yeh <syeh@vmware.com> Signed-off-by: Christian König <christian.koenig@amd.com> Reviewed-by: Alex Deucher <alexander.deucher@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
		
							parent
							
								
									dfd5e50ea4
								
							
						
					
					
						commit
						8aa6d4fc5f
					
				| @ -1322,7 +1322,7 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr, | ||||
| 	} | ||||
| 
 | ||||
| 	/* Fallback to software copy. */ | ||||
| 	ret = ttm_bo_wait(bo, true, intr, no_wait_gpu); | ||||
| 	ret = ttm_bo_wait(bo, intr, no_wait_gpu); | ||||
| 	if (ret == 0) | ||||
| 		ret = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem); | ||||
| 
 | ||||
|  | ||||
| @ -126,7 +126,7 @@ nouveau_gem_object_unmap(struct nouveau_bo *nvbo, struct nvkm_vma *vma) | ||||
| 	list_del(&vma->head); | ||||
| 
 | ||||
| 	if (fobj && fobj->shared_count > 1) | ||||
| 		ttm_bo_wait(&nvbo->bo, true, false, false); | ||||
| 		ttm_bo_wait(&nvbo->bo, false, false); | ||||
| 	else if (fobj && fobj->shared_count == 1) | ||||
| 		fence = rcu_dereference_protected(fobj->shared[0], | ||||
| 						reservation_object_held(resv)); | ||||
| @ -651,7 +651,7 @@ nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli *cli, | ||||
| 				data |= r->vor; | ||||
| 		} | ||||
| 
 | ||||
| 		ret = ttm_bo_wait(&nvbo->bo, true, false, false); | ||||
| 		ret = ttm_bo_wait(&nvbo->bo, false, false); | ||||
| 		if (ret) { | ||||
| 			NV_PRINTK(err, cli, "reloc wait_idle failed: %d\n", ret); | ||||
| 			break; | ||||
|  | ||||
| @ -624,7 +624,7 @@ static int qxl_reap_surf(struct qxl_device *qdev, struct qxl_bo *surf, bool stal | ||||
| 	if (stall) | ||||
| 		mutex_unlock(&qdev->surf_evict_mutex); | ||||
| 
 | ||||
| 	ret = ttm_bo_wait(&surf->tbo, true, true, !stall); | ||||
| 	ret = ttm_bo_wait(&surf->tbo, true, !stall); | ||||
| 
 | ||||
| 	if (stall) | ||||
| 		mutex_lock(&qdev->surf_evict_mutex); | ||||
|  | ||||
| @ -79,7 +79,7 @@ static inline int qxl_bo_wait(struct qxl_bo *bo, u32 *mem_type, | ||||
| 	if (mem_type) | ||||
| 		*mem_type = bo->tbo.mem.mem_type; | ||||
| 
 | ||||
| 	r = ttm_bo_wait(&bo->tbo, true, true, no_wait); | ||||
| 	r = ttm_bo_wait(&bo->tbo, true, no_wait); | ||||
| 	ttm_bo_unreserve(&bo->tbo); | ||||
| 	return r; | ||||
| } | ||||
|  | ||||
| @ -838,7 +838,7 @@ int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type, bool no_wait) | ||||
| 	if (mem_type) | ||||
| 		*mem_type = bo->tbo.mem.mem_type; | ||||
| 
 | ||||
| 	r = ttm_bo_wait(&bo->tbo, true, true, no_wait); | ||||
| 	r = ttm_bo_wait(&bo->tbo, true, no_wait); | ||||
| 	ttm_bo_unreserve(&bo->tbo); | ||||
| 	return r; | ||||
| } | ||||
|  | ||||
| @ -455,7 +455,7 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo) | ||||
| 	ret = __ttm_bo_reserve(bo, false, true, NULL); | ||||
| 
 | ||||
| 	if (!ret) { | ||||
| 		if (!ttm_bo_wait(bo, false, false, true)) { | ||||
| 		if (!ttm_bo_wait(bo, false, true)) { | ||||
| 			put_count = ttm_bo_del_from_lru(bo); | ||||
| 
 | ||||
| 			spin_unlock(&glob->lru_lock); | ||||
| @ -508,7 +508,7 @@ static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo, | ||||
| 	int put_count; | ||||
| 	int ret; | ||||
| 
 | ||||
| 	ret = ttm_bo_wait(bo, false, false, true); | ||||
| 	ret = ttm_bo_wait(bo, false, true); | ||||
| 
 | ||||
| 	if (ret && !no_wait_gpu) { | ||||
| 		long lret; | ||||
| @ -545,7 +545,7 @@ static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo, | ||||
| 		 * remove sync_obj with ttm_bo_wait, the wait should be | ||||
| 		 * finished, and no new wait object should have been added. | ||||
| 		 */ | ||||
| 		ret = ttm_bo_wait(bo, false, false, true); | ||||
| 		ret = ttm_bo_wait(bo, false, true); | ||||
| 		WARN_ON(ret); | ||||
| 	} | ||||
| 
 | ||||
| @ -684,7 +684,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible, | ||||
| 	struct ttm_placement placement; | ||||
| 	int ret = 0; | ||||
| 
 | ||||
| 	ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu); | ||||
| 	ret = ttm_bo_wait(bo, interruptible, no_wait_gpu); | ||||
| 
 | ||||
| 	if (unlikely(ret != 0)) { | ||||
| 		if (ret != -ERESTARTSYS) { | ||||
| @ -1006,7 +1006,7 @@ static int ttm_bo_move_buffer(struct ttm_buffer_object *bo, | ||||
| 		 * Have the driver move function wait for idle when necessary, | ||||
| 		 * instead of doing it here. | ||||
| 		 */ | ||||
| 		ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu); | ||||
| 		ret = ttm_bo_wait(bo, interruptible, no_wait_gpu); | ||||
| 		if (ret) | ||||
| 			return ret; | ||||
| 	} | ||||
| @ -1567,7 +1567,7 @@ void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo) | ||||
| EXPORT_SYMBOL(ttm_bo_unmap_virtual); | ||||
| 
 | ||||
| int ttm_bo_wait(struct ttm_buffer_object *bo, | ||||
| 		bool lazy, bool interruptible, bool no_wait) | ||||
| 		bool interruptible, bool no_wait) | ||||
| { | ||||
| 	struct reservation_object_list *fobj; | ||||
| 	struct reservation_object *resv; | ||||
| @ -1625,7 +1625,7 @@ int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait) | ||||
| 	ret = ttm_bo_reserve(bo, true, no_wait, NULL); | ||||
| 	if (unlikely(ret != 0)) | ||||
| 		return ret; | ||||
| 	ret = ttm_bo_wait(bo, false, true, no_wait); | ||||
| 	ret = ttm_bo_wait(bo, true, no_wait); | ||||
| 	if (likely(ret == 0)) | ||||
| 		atomic_inc(&bo->cpu_writers); | ||||
| 	ttm_bo_unreserve(bo); | ||||
| @ -1682,7 +1682,7 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink) | ||||
| 	 * Wait for GPU, then move to system cached. | ||||
| 	 */ | ||||
| 
 | ||||
| 	ret = ttm_bo_wait(bo, false, false, false); | ||||
| 	ret = ttm_bo_wait(bo, false, false); | ||||
| 
 | ||||
| 	if (unlikely(ret != 0)) | ||||
| 		goto out; | ||||
|  | ||||
| @ -645,7 +645,7 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, | ||||
| 
 | ||||
| 	reservation_object_add_excl_fence(bo->resv, fence); | ||||
| 	if (evict) { | ||||
| 		ret = ttm_bo_wait(bo, false, false, false); | ||||
| 		ret = ttm_bo_wait(bo, false, false); | ||||
| 		if (ret) | ||||
| 			return ret; | ||||
| 
 | ||||
|  | ||||
| @ -54,7 +54,7 @@ static int ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo, | ||||
| 	/*
 | ||||
| 	 * Quick non-stalling check for idle. | ||||
| 	 */ | ||||
| 	ret = ttm_bo_wait(bo, false, false, true); | ||||
| 	ret = ttm_bo_wait(bo, false, true); | ||||
| 	if (likely(ret == 0)) | ||||
| 		goto out_unlock; | ||||
| 
 | ||||
| @ -68,14 +68,14 @@ static int ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo, | ||||
| 			goto out_unlock; | ||||
| 
 | ||||
| 		up_read(&vma->vm_mm->mmap_sem); | ||||
| 		(void) ttm_bo_wait(bo, false, true, false); | ||||
| 		(void) ttm_bo_wait(bo, true, false); | ||||
| 		goto out_unlock; | ||||
| 	} | ||||
| 
 | ||||
| 	/*
 | ||||
| 	 * Ordinary wait. | ||||
| 	 */ | ||||
| 	ret = ttm_bo_wait(bo, false, true, false); | ||||
| 	ret = ttm_bo_wait(bo, true, false); | ||||
| 	if (unlikely(ret != 0)) | ||||
| 		ret = (ret != -ERESTARTSYS) ? VM_FAULT_SIGBUS : | ||||
| 			VM_FAULT_NOPAGE; | ||||
|  | ||||
| @ -158,7 +158,7 @@ int virtio_gpu_object_wait(struct virtio_gpu_object *bo, bool no_wait) | ||||
| 	r = ttm_bo_reserve(&bo->tbo, true, no_wait, NULL); | ||||
| 	if (unlikely(r != 0)) | ||||
| 		return r; | ||||
| 	r = ttm_bo_wait(&bo->tbo, true, true, no_wait); | ||||
| 	r = ttm_bo_wait(&bo->tbo, true, no_wait); | ||||
| 	ttm_bo_unreserve(&bo->tbo); | ||||
| 	return r; | ||||
| } | ||||
|  | ||||
| @ -839,7 +839,7 @@ static void vmw_move_notify(struct ttm_buffer_object *bo, | ||||
|  */ | ||||
| static void vmw_swap_notify(struct ttm_buffer_object *bo) | ||||
| { | ||||
| 	ttm_bo_wait(bo, false, false, false); | ||||
| 	ttm_bo_wait(bo, false, false); | ||||
| } | ||||
| 
 | ||||
| 
 | ||||
|  | ||||
| @ -423,7 +423,7 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size) | ||||
| 	bo = &buf->base; | ||||
| 	WARN_ON_ONCE(ttm_bo_reserve(bo, false, true, NULL)); | ||||
| 
 | ||||
| 	ret = ttm_bo_wait(old_bo, false, false, false); | ||||
| 	ret = ttm_bo_wait(old_bo, false, false); | ||||
| 	if (unlikely(ret != 0)) { | ||||
| 		DRM_ERROR("Failed waiting for cotable unbind.\n"); | ||||
| 		goto out_wait; | ||||
|  | ||||
| @ -1512,7 +1512,7 @@ void vmw_resource_move_notify(struct ttm_buffer_object *bo, | ||||
| 			list_del_init(&res->mob_head); | ||||
| 		} | ||||
| 
 | ||||
| 		(void) ttm_bo_wait(bo, false, false, false); | ||||
| 		(void) ttm_bo_wait(bo, false, false); | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| @ -1605,7 +1605,7 @@ void vmw_query_move_notify(struct ttm_buffer_object *bo, | ||||
| 		if (fence != NULL) | ||||
| 			vmw_fence_obj_unreference(&fence); | ||||
| 
 | ||||
| 		(void) ttm_bo_wait(bo, false, false, false); | ||||
| 		(void) ttm_bo_wait(bo, false, false); | ||||
| 	} else | ||||
| 		mutex_unlock(&dev_priv->binding_mutex); | ||||
| 
 | ||||
|  | ||||
| @ -314,7 +314,7 @@ ttm_bo_reference(struct ttm_buffer_object *bo) | ||||
|  * Returns -EBUSY if no_wait is true and the buffer is busy. | ||||
|  * Returns -ERESTARTSYS if interrupted by a signal. | ||||
|  */ | ||||
| extern int ttm_bo_wait(struct ttm_buffer_object *bo, bool lazy, | ||||
| extern int ttm_bo_wait(struct ttm_buffer_object *bo, | ||||
| 		       bool interruptible, bool no_wait); | ||||
| /**
 | ||||
|  * ttm_bo_validate | ||||
|  | ||||
		Loading…
	
		Reference in New Issue
	
	Block a user