drm/i915: Remove the defunct flushing list
As we guarantee to emit a flush before emitting the breadcrumb or the next batchbuffer, there is no further need for the flushing list. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
This commit is contained in:
		
							parent
							
								
									0201f1ecf4
								
							
						
					
					
						commit
						65ce302741
					
				| @ -44,7 +44,6 @@ | ||||
| 
 | ||||
| enum { | ||||
| 	ACTIVE_LIST, | ||||
| 	FLUSHING_LIST, | ||||
| 	INACTIVE_LIST, | ||||
| 	PINNED_LIST, | ||||
| }; | ||||
| @ -178,10 +177,6 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data) | ||||
| 		seq_printf(m, "Inactive:\n"); | ||||
| 		head = &dev_priv->mm.inactive_list; | ||||
| 		break; | ||||
| 	case FLUSHING_LIST: | ||||
| 		seq_printf(m, "Flushing:\n"); | ||||
| 		head = &dev_priv->mm.flushing_list; | ||||
| 		break; | ||||
| 	default: | ||||
| 		mutex_unlock(&dev->struct_mutex); | ||||
| 		return -EINVAL; | ||||
| @ -239,7 +234,6 @@ static int i915_gem_object_info(struct seq_file *m, void* data) | ||||
| 
 | ||||
| 	size = count = mappable_size = mappable_count = 0; | ||||
| 	count_objects(&dev_priv->mm.active_list, mm_list); | ||||
| 	count_objects(&dev_priv->mm.flushing_list, mm_list); | ||||
| 	seq_printf(m, "  %u [%u] active objects, %zu [%zu] bytes\n", | ||||
| 		   count, mappable_count, size, mappable_size); | ||||
| 
 | ||||
| @ -2007,7 +2001,6 @@ static struct drm_info_list i915_debugfs_list[] = { | ||||
| 	{"i915_gem_gtt", i915_gem_gtt_info, 0}, | ||||
| 	{"i915_gem_pinned", i915_gem_gtt_info, 0, (void *) PINNED_LIST}, | ||||
| 	{"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST}, | ||||
| 	{"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST}, | ||||
| 	{"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST}, | ||||
| 	{"i915_gem_pageflip", i915_gem_pageflip_info, 0}, | ||||
| 	{"i915_gem_request", i915_gem_request_info, 0}, | ||||
|  | ||||
| @ -695,17 +695,6 @@ typedef struct drm_i915_private { | ||||
| 		 */ | ||||
| 		struct list_head active_list; | ||||
| 
 | ||||
| 		/**
 | ||||
| 		 * List of objects which are not in the ringbuffer but which | ||||
| 		 * still have a write_domain which needs to be flushed before | ||||
| 		 * unbinding. | ||||
| 		 * | ||||
| 		 * last_rendering_seqno is 0 while an object is in this list. | ||||
| 		 * | ||||
| 		 * A reference is held on the buffer while on this list. | ||||
| 		 */ | ||||
| 		struct list_head flushing_list; | ||||
| 
 | ||||
| 		/**
 | ||||
| 		 * LRU list of objects which are not in the ringbuffer and | ||||
| 		 * are ready to unbind, but are still in the GTT. | ||||
| @ -873,7 +862,7 @@ struct drm_i915_gem_object { | ||||
| 	struct drm_mm_node *gtt_space; | ||||
| 	struct list_head gtt_list; | ||||
| 
 | ||||
| 	/** This object's place on the active/flushing/inactive lists */ | ||||
| 	/** This object's place on the active/inactive lists */ | ||||
| 	struct list_head ring_list; | ||||
| 	struct list_head mm_list; | ||||
| 	/** This object's place on GPU write list */ | ||||
| @ -882,9 +871,9 @@ struct drm_i915_gem_object { | ||||
| 	struct list_head exec_list; | ||||
| 
 | ||||
| 	/**
 | ||||
| 	 * This is set if the object is on the active or flushing lists | ||||
| 	 * (has pending rendering), and is not set if it's on inactive (ready | ||||
| 	 * to be unbound). | ||||
| 	 * This is set if the object is on the active lists (has pending | ||||
| 	 * rendering and so a non-zero seqno), and is not set if it i s on | ||||
| 	 * inactive (ready to be unbound) list. | ||||
| 	 */ | ||||
| 	unsigned int active:1; | ||||
| 
 | ||||
|  | ||||
| @ -1457,27 +1457,6 @@ i915_gem_object_move_to_active(struct drm_i915_gem_object *obj, | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| static void | ||||
| i915_gem_object_move_off_active(struct drm_i915_gem_object *obj) | ||||
| { | ||||
| 	list_del_init(&obj->ring_list); | ||||
| 	obj->last_read_seqno = 0; | ||||
| 	obj->last_write_seqno = 0; | ||||
| 	obj->last_fenced_seqno = 0; | ||||
| } | ||||
| 
 | ||||
| static void | ||||
| i915_gem_object_move_to_flushing(struct drm_i915_gem_object *obj) | ||||
| { | ||||
| 	struct drm_device *dev = obj->base.dev; | ||||
| 	drm_i915_private_t *dev_priv = dev->dev_private; | ||||
| 
 | ||||
| 	BUG_ON(!obj->active); | ||||
| 	list_move_tail(&obj->mm_list, &dev_priv->mm.flushing_list); | ||||
| 
 | ||||
| 	i915_gem_object_move_off_active(obj); | ||||
| } | ||||
| 
 | ||||
| static void | ||||
| i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj) | ||||
| { | ||||
| @ -1487,10 +1466,17 @@ i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj) | ||||
| 	list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list); | ||||
| 
 | ||||
| 	BUG_ON(!list_empty(&obj->gpu_write_list)); | ||||
| 	BUG_ON(obj->base.write_domain & ~I915_GEM_GPU_DOMAINS); | ||||
| 	BUG_ON(!obj->active); | ||||
| 
 | ||||
| 	list_del_init(&obj->ring_list); | ||||
| 	obj->ring = NULL; | ||||
| 
 | ||||
| 	i915_gem_object_move_off_active(obj); | ||||
| 	obj->last_read_seqno = 0; | ||||
| 	obj->last_write_seqno = 0; | ||||
| 	obj->base.write_domain = 0; | ||||
| 
 | ||||
| 	obj->last_fenced_seqno = 0; | ||||
| 	obj->fenced_gpu_access = false; | ||||
| 
 | ||||
| 	obj->active = 0; | ||||
| @ -1694,7 +1680,6 @@ static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv, | ||||
| 				       struct drm_i915_gem_object, | ||||
| 				       ring_list); | ||||
| 
 | ||||
| 		obj->base.write_domain = 0; | ||||
| 		list_del_init(&obj->gpu_write_list); | ||||
| 		i915_gem_object_move_to_inactive(obj); | ||||
| 	} | ||||
| @ -1731,20 +1716,6 @@ void i915_gem_reset(struct drm_device *dev) | ||||
| 	for_each_ring(ring, dev_priv, i) | ||||
| 		i915_gem_reset_ring_lists(dev_priv, ring); | ||||
| 
 | ||||
| 	/* Remove anything from the flushing lists. The GPU cache is likely
 | ||||
| 	 * to be lost on reset along with the data, so simply move the | ||||
| 	 * lost bo to the inactive list. | ||||
| 	 */ | ||||
| 	while (!list_empty(&dev_priv->mm.flushing_list)) { | ||||
| 		obj = list_first_entry(&dev_priv->mm.flushing_list, | ||||
| 				      struct drm_i915_gem_object, | ||||
| 				      mm_list); | ||||
| 
 | ||||
| 		obj->base.write_domain = 0; | ||||
| 		list_del_init(&obj->gpu_write_list); | ||||
| 		i915_gem_object_move_to_inactive(obj); | ||||
| 	} | ||||
| 
 | ||||
| 	/* Move everything out of the GPU domains to ensure we do any
 | ||||
| 	 * necessary invalidation upon reuse. | ||||
| 	 */ | ||||
| @ -1815,10 +1786,7 @@ i915_gem_retire_requests_ring(struct intel_ring_buffer *ring) | ||||
| 		if (!i915_seqno_passed(seqno, obj->last_read_seqno)) | ||||
| 			break; | ||||
| 
 | ||||
| 		if (obj->base.write_domain != 0) | ||||
| 			i915_gem_object_move_to_flushing(obj); | ||||
| 		else | ||||
| 			i915_gem_object_move_to_inactive(obj); | ||||
| 		i915_gem_object_move_to_inactive(obj); | ||||
| 	} | ||||
| 
 | ||||
| 	if (unlikely(ring->trace_irq_seqno && | ||||
| @ -3897,7 +3865,6 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data, | ||||
| 	} | ||||
| 
 | ||||
| 	BUG_ON(!list_empty(&dev_priv->mm.active_list)); | ||||
| 	BUG_ON(!list_empty(&dev_priv->mm.flushing_list)); | ||||
| 	BUG_ON(!list_empty(&dev_priv->mm.inactive_list)); | ||||
| 	mutex_unlock(&dev->struct_mutex); | ||||
| 
 | ||||
| @ -3955,7 +3922,6 @@ i915_gem_load(struct drm_device *dev) | ||||
| 	drm_i915_private_t *dev_priv = dev->dev_private; | ||||
| 
 | ||||
| 	INIT_LIST_HEAD(&dev_priv->mm.active_list); | ||||
| 	INIT_LIST_HEAD(&dev_priv->mm.flushing_list); | ||||
| 	INIT_LIST_HEAD(&dev_priv->mm.inactive_list); | ||||
| 	INIT_LIST_HEAD(&dev_priv->mm.fence_list); | ||||
| 	INIT_LIST_HEAD(&dev_priv->mm.gtt_list); | ||||
| @ -4206,12 +4172,7 @@ static int | ||||
| i915_gpu_is_active(struct drm_device *dev) | ||||
| { | ||||
| 	drm_i915_private_t *dev_priv = dev->dev_private; | ||||
| 	int lists_empty; | ||||
| 
 | ||||
| 	lists_empty = list_empty(&dev_priv->mm.flushing_list) && | ||||
| 		      list_empty(&dev_priv->mm.active_list); | ||||
| 
 | ||||
| 	return !lists_empty; | ||||
| 	return !list_empty(&dev_priv->mm.active_list); | ||||
| } | ||||
| 
 | ||||
| static int | ||||
|  | ||||
| @ -93,23 +93,6 @@ i915_gem_evict_something(struct drm_device *dev, int min_size, | ||||
| 
 | ||||
| 	/* Now merge in the soon-to-be-expired objects... */ | ||||
| 	list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) { | ||||
| 		/* Does the object require an outstanding flush? */ | ||||
| 		if (obj->base.write_domain) | ||||
| 			continue; | ||||
| 
 | ||||
| 		if (mark_free(obj, &unwind_list)) | ||||
| 			goto found; | ||||
| 	} | ||||
| 
 | ||||
| 	/* Finally add anything with a pending flush (in order of retirement) */ | ||||
| 	list_for_each_entry(obj, &dev_priv->mm.flushing_list, mm_list) { | ||||
| 		if (mark_free(obj, &unwind_list)) | ||||
| 			goto found; | ||||
| 	} | ||||
| 	list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) { | ||||
| 		if (!obj->base.write_domain) | ||||
| 			continue; | ||||
| 
 | ||||
| 		if (mark_free(obj, &unwind_list)) | ||||
| 			goto found; | ||||
| 	} | ||||
| @ -172,7 +155,6 @@ i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only) | ||||
| 	int ret; | ||||
| 
 | ||||
| 	lists_empty = (list_empty(&dev_priv->mm.inactive_list) && | ||||
| 		       list_empty(&dev_priv->mm.flushing_list) && | ||||
| 		       list_empty(&dev_priv->mm.active_list)); | ||||
| 	if (lists_empty) | ||||
| 		return -ENOSPC; | ||||
| @ -189,8 +171,6 @@ i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only) | ||||
| 
 | ||||
| 	i915_gem_retire_requests(dev); | ||||
| 
 | ||||
| 	BUG_ON(!list_empty(&dev_priv->mm.flushing_list)); | ||||
| 
 | ||||
| 	/* Having flushed everything, unbind() should never raise an error */ | ||||
| 	list_for_each_entry_safe(obj, next, | ||||
| 				 &dev_priv->mm.inactive_list, mm_list) { | ||||
|  | ||||
		Loading…
	
		Reference in New Issue
	
	Block a user