Merge tag 'drm-intel-next-fixes-2016-12-07' of git://anongit.freedesktop.org/git/drm-intel into drm-next
first set of fixes for -next.
* tag 'drm-intel-next-fixes-2016-12-07' of git://anongit.freedesktop.org/git/drm-intel:
  drm/i915: Move priority bumping for flips earlier
  drm/i915: Hold a reference on the request for its fence chain
  drm/i915/audio: fix hdmi audio noise issue
  drm/i915/debugfs: Increment return value of gt.next_seqno
  drm/i915/debugfs: Drop i915_hws_info
  drm/i915: Initialize dev_priv->atomic_cdclk_freq at init time
  drm/i915: Fix cdclk vs. dev_cdclk mess when not recomputing things
  drm/i915: Make skl_write_{plane,cursor}_wm() static
  drm/i915: Complete requests in nop_submit_request
  drm/i915/gvt: fix lock not released bug for dispatch_workload() err path
  drm/i915/gvt: fix getting 64bit bar size error
  drm/i915/gvt: fix missing init param.primary
			
			
This commit is contained in:
		
						commit
						3eff97b2d6
					
				| @ -361,6 +361,8 @@ static inline void intel_vgpu_write_pci_bar(struct intel_vgpu *vgpu, | ||||
| 		 * leave the bit 3 - bit 0 unchanged. | ||||
| 		 */ | ||||
| 		*pval = (val & GENMASK(31, 4)) | (*pval & GENMASK(3, 0)); | ||||
| 	} else { | ||||
| 		*pval = val; | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
|  | ||||
| @ -177,8 +177,8 @@ static int dispatch_workload(struct intel_vgpu_workload *workload) | ||||
| 	rq = i915_gem_request_alloc(dev_priv->engine[ring_id], shadow_ctx); | ||||
| 	if (IS_ERR(rq)) { | ||||
| 		gvt_err("fail to allocate gem request\n"); | ||||
| 		workload->status = PTR_ERR(rq); | ||||
| 		return workload->status; | ||||
| 		ret = PTR_ERR(rq); | ||||
| 		goto out; | ||||
| 	} | ||||
| 
 | ||||
| 	gvt_dbg_sched("ring id %d get i915 gem request %p\n", ring_id, rq); | ||||
| @ -212,7 +212,8 @@ out: | ||||
| 	if (ret) | ||||
| 		workload->status = ret; | ||||
| 
 | ||||
| 	i915_add_request_no_flush(rq); | ||||
| 	if (!IS_ERR_OR_NULL(rq)) | ||||
| 		i915_add_request_no_flush(rq); | ||||
| 	mutex_unlock(&dev_priv->drm.struct_mutex); | ||||
| 	return ret; | ||||
| } | ||||
| @ -460,7 +461,8 @@ complete: | ||||
| 
 | ||||
| 		complete_current_workload(gvt, ring_id); | ||||
| 
 | ||||
| 		i915_gem_request_put(fetch_and_zero(&workload->req)); | ||||
| 		if (workload->req) | ||||
| 			i915_gem_request_put(fetch_and_zero(&workload->req)); | ||||
| 
 | ||||
| 		if (need_force_wake) | ||||
| 			intel_uncore_forcewake_put(gvt->dev_priv, | ||||
|  | ||||
| @ -378,6 +378,7 @@ struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt, | ||||
| 	struct intel_vgpu *vgpu; | ||||
| 
 | ||||
| 	param.handle = 0; | ||||
| 	param.primary = 1; | ||||
| 	param.low_gm_sz = type->low_gm_size; | ||||
| 	param.high_gm_sz = type->high_gm_size; | ||||
| 	param.fence_sz = type->fence; | ||||
|  | ||||
| @ -935,27 +935,6 @@ static int i915_gem_fence_regs_info(struct seq_file *m, void *data) | ||||
| 	return 0; | ||||
| } | ||||
| 
 | ||||
| static int i915_hws_info(struct seq_file *m, void *data) | ||||
| { | ||||
| 	struct drm_info_node *node = m->private; | ||||
| 	struct drm_i915_private *dev_priv = node_to_i915(node); | ||||
| 	struct intel_engine_cs *engine; | ||||
| 	const u32 *hws; | ||||
| 	int i; | ||||
| 
 | ||||
| 	engine = dev_priv->engine[(uintptr_t)node->info_ent->data]; | ||||
| 	hws = engine->status_page.page_addr; | ||||
| 	if (hws == NULL) | ||||
| 		return 0; | ||||
| 
 | ||||
| 	for (i = 0; i < 4096 / sizeof(u32) / 4; i += 4) { | ||||
| 		seq_printf(m, "0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n", | ||||
| 			   i * 4, | ||||
| 			   hws[i], hws[i + 1], hws[i + 2], hws[i + 3]); | ||||
| 	} | ||||
| 	return 0; | ||||
| } | ||||
| 
 | ||||
| #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR) | ||||
| 
 | ||||
| static ssize_t | ||||
| @ -1047,7 +1026,7 @@ i915_next_seqno_get(void *data, u64 *val) | ||||
| { | ||||
| 	struct drm_i915_private *dev_priv = data; | ||||
| 
 | ||||
| 	*val = atomic_read(&dev_priv->gt.global_timeline.next_seqno); | ||||
| 	*val = 1 + atomic_read(&dev_priv->gt.global_timeline.next_seqno); | ||||
| 	return 0; | ||||
| } | ||||
| 
 | ||||
| @ -5403,10 +5382,6 @@ static const struct drm_info_list i915_debugfs_list[] = { | ||||
| 	{"i915_gem_seqno", i915_gem_seqno_info, 0}, | ||||
| 	{"i915_gem_fence_regs", i915_gem_fence_regs_info, 0}, | ||||
| 	{"i915_gem_interrupt", i915_interrupt_info, 0}, | ||||
| 	{"i915_gem_hws", i915_hws_info, 0, (void *)RCS}, | ||||
| 	{"i915_gem_hws_blt", i915_hws_info, 0, (void *)BCS}, | ||||
| 	{"i915_gem_hws_bsd", i915_hws_info, 0, (void *)VCS}, | ||||
| 	{"i915_gem_hws_vebox", i915_hws_info, 0, (void *)VECS}, | ||||
| 	{"i915_gem_batch_pool", i915_gem_batch_pool_info, 0}, | ||||
| 	{"i915_guc_info", i915_guc_info, 0}, | ||||
| 	{"i915_guc_load_status", i915_guc_load_status_info, 0}, | ||||
|  | ||||
| @ -2764,6 +2764,8 @@ void i915_gem_reset(struct drm_i915_private *dev_priv) | ||||
| 
 | ||||
| static void nop_submit_request(struct drm_i915_gem_request *request) | ||||
| { | ||||
| 	i915_gem_request_submit(request); | ||||
| 	intel_engine_init_global_seqno(request->engine, request->global_seqno); | ||||
| } | ||||
| 
 | ||||
| static void i915_gem_cleanup_engine(struct intel_engine_cs *engine) | ||||
|  | ||||
| @ -200,8 +200,8 @@ static void i915_gem_request_retire(struct drm_i915_gem_request *request) | ||||
| 	struct i915_gem_active *active, *next; | ||||
| 
 | ||||
| 	lockdep_assert_held(&request->i915->drm.struct_mutex); | ||||
| 	GEM_BUG_ON(!i915_sw_fence_done(&request->submit)); | ||||
| 	GEM_BUG_ON(!i915_sw_fence_done(&request->execute)); | ||||
| 	GEM_BUG_ON(!i915_sw_fence_signaled(&request->submit)); | ||||
| 	GEM_BUG_ON(!i915_sw_fence_signaled(&request->execute)); | ||||
| 	GEM_BUG_ON(!i915_gem_request_completed(request)); | ||||
| 	GEM_BUG_ON(!request->i915->gt.active_requests); | ||||
| 
 | ||||
| @ -445,11 +445,17 @@ void i915_gem_request_submit(struct drm_i915_gem_request *request) | ||||
| static int __i915_sw_fence_call | ||||
| submit_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state) | ||||
| { | ||||
| 	if (state == FENCE_COMPLETE) { | ||||
| 		struct drm_i915_gem_request *request = | ||||
| 			container_of(fence, typeof(*request), submit); | ||||
| 	struct drm_i915_gem_request *request = | ||||
| 		container_of(fence, typeof(*request), submit); | ||||
| 
 | ||||
| 	switch (state) { | ||||
| 	case FENCE_COMPLETE: | ||||
| 		request->engine->submit_request(request); | ||||
| 		break; | ||||
| 
 | ||||
| 	case FENCE_FREE: | ||||
| 		i915_gem_request_put(request); | ||||
| 		break; | ||||
| 	} | ||||
| 
 | ||||
| 	return NOTIFY_DONE; | ||||
| @ -458,6 +464,18 @@ submit_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state) | ||||
| static int __i915_sw_fence_call | ||||
| execute_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state) | ||||
| { | ||||
| 	struct drm_i915_gem_request *request = | ||||
| 		container_of(fence, typeof(*request), execute); | ||||
| 
 | ||||
| 	switch (state) { | ||||
| 	case FENCE_COMPLETE: | ||||
| 		break; | ||||
| 
 | ||||
| 	case FENCE_FREE: | ||||
| 		i915_gem_request_put(request); | ||||
| 		break; | ||||
| 	} | ||||
| 
 | ||||
| 	return NOTIFY_DONE; | ||||
| } | ||||
| 
 | ||||
| @ -545,8 +563,10 @@ i915_gem_request_alloc(struct intel_engine_cs *engine, | ||||
| 		       req->timeline->fence_context, | ||||
| 		       __timeline_get_seqno(req->timeline->common)); | ||||
| 
 | ||||
| 	i915_sw_fence_init(&req->submit, submit_notify); | ||||
| 	i915_sw_fence_init(&req->execute, execute_notify); | ||||
| 	/* We bump the ref for the fence chain */ | ||||
| 	i915_sw_fence_init(&i915_gem_request_get(req)->submit, submit_notify); | ||||
| 	i915_sw_fence_init(&i915_gem_request_get(req)->execute, execute_notify); | ||||
| 
 | ||||
| 	/* Ensure that the execute fence completes after the submit fence -
 | ||||
| 	 * as we complete the execute fence from within the submit fence | ||||
| 	 * callback, its completion would otherwise be visible first. | ||||
|  | ||||
| @ -75,6 +75,11 @@ int i915_sw_fence_await_reservation(struct i915_sw_fence *fence, | ||||
| 				    unsigned long timeout, | ||||
| 				    gfp_t gfp); | ||||
| 
 | ||||
| static inline bool i915_sw_fence_signaled(const struct i915_sw_fence *fence) | ||||
| { | ||||
| 	return atomic_read(&fence->pending) <= 0; | ||||
| } | ||||
| 
 | ||||
| static inline bool i915_sw_fence_done(const struct i915_sw_fence *fence) | ||||
| { | ||||
| 	return atomic_read(&fence->pending) < 0; | ||||
|  | ||||
| @ -351,10 +351,13 @@ hsw_hdmi_audio_config_update(struct intel_crtc *intel_crtc, enum port port, | ||||
| 
 | ||||
| 	I915_WRITE(HSW_AUD_CFG(pipe), tmp); | ||||
| 
 | ||||
| 	/*
 | ||||
| 	 * Let's disable "Enable CTS or M Prog bit" | ||||
| 	 * and let HW calculate the value | ||||
| 	 */ | ||||
| 	tmp = I915_READ(HSW_AUD_M_CTS_ENABLE(pipe)); | ||||
| 	tmp &= ~AUD_CONFIG_M_MASK; | ||||
| 	tmp &= ~AUD_M_CTS_M_PROG_ENABLE; | ||||
| 	tmp &= ~AUD_M_CTS_M_VALUE_INDEX; | ||||
| 	tmp |= AUD_M_CTS_M_PROG_ENABLE; | ||||
| 	I915_WRITE(HSW_AUD_M_CTS_ENABLE(pipe), tmp); | ||||
| } | ||||
| 
 | ||||
|  | ||||
| @ -12028,7 +12028,6 @@ static void intel_mmio_flip_work_func(struct work_struct *w) | ||||
| 		to_intel_framebuffer(crtc->base.primary->fb); | ||||
| 	struct drm_i915_gem_object *obj = intel_fb->obj; | ||||
| 
 | ||||
| 	i915_gem_object_wait_priority(obj, 0, I915_PRIORITY_DISPLAY); | ||||
| 	WARN_ON(i915_gem_object_wait(obj, 0, MAX_SCHEDULE_TIMEOUT, NULL) < 0); | ||||
| 
 | ||||
| 	intel_pipe_update_start(crtc); | ||||
| @ -12284,6 +12283,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, | ||||
| 		i915_add_request_no_flush(request); | ||||
| 	} | ||||
| 
 | ||||
| 	i915_gem_object_wait_priority(obj, 0, I915_PRIORITY_DISPLAY); | ||||
| 	i915_gem_track_fb(intel_fb_obj(old_fb), obj, | ||||
| 			  to_intel_plane(primary)->frontbuffer_bit); | ||||
| 	mutex_unlock(&dev->struct_mutex); | ||||
| @ -13995,8 +13995,9 @@ static int intel_modeset_checks(struct drm_atomic_state *state) | ||||
| 
 | ||||
| 		DRM_DEBUG_KMS("New cdclk calculated to be atomic %u, actual %u\n", | ||||
| 			      intel_state->cdclk, intel_state->dev_cdclk); | ||||
| 	} else | ||||
| 	} else { | ||||
| 		to_intel_atomic_state(state)->cdclk = dev_priv->atomic_cdclk_freq; | ||||
| 	} | ||||
| 
 | ||||
| 	intel_modeset_clear_plls(state); | ||||
| 
 | ||||
| @ -14097,8 +14098,9 @@ static int intel_atomic_check(struct drm_device *dev, | ||||
| 
 | ||||
| 		if (ret) | ||||
| 			return ret; | ||||
| 	} else | ||||
| 		intel_state->cdclk = dev_priv->cdclk_freq; | ||||
| 	} else { | ||||
| 		intel_state->cdclk = dev_priv->atomic_cdclk_freq; | ||||
| 	} | ||||
| 
 | ||||
| 	ret = drm_atomic_helper_check_planes(dev, state); | ||||
| 	if (ret) | ||||
| @ -16485,6 +16487,7 @@ int intel_modeset_init(struct drm_device *dev) | ||||
| 
 | ||||
| 	intel_update_czclk(dev_priv); | ||||
| 	intel_update_cdclk(dev_priv); | ||||
| 	dev_priv->atomic_cdclk_freq = dev_priv->cdclk_freq; | ||||
| 
 | ||||
| 	intel_shared_dpll_init(dev); | ||||
| 
 | ||||
|  | ||||
| @ -3851,10 +3851,10 @@ static void skl_write_wm_level(struct drm_i915_private *dev_priv, | ||||
| 	I915_WRITE(reg, val); | ||||
| } | ||||
| 
 | ||||
| void skl_write_plane_wm(struct intel_crtc *intel_crtc, | ||||
| 			const struct skl_plane_wm *wm, | ||||
| 			const struct skl_ddb_allocation *ddb, | ||||
| 			int plane) | ||||
| static void skl_write_plane_wm(struct intel_crtc *intel_crtc, | ||||
| 			       const struct skl_plane_wm *wm, | ||||
| 			       const struct skl_ddb_allocation *ddb, | ||||
| 			       int plane) | ||||
| { | ||||
| 	struct drm_crtc *crtc = &intel_crtc->base; | ||||
| 	struct drm_device *dev = crtc->dev; | ||||
| @ -3875,9 +3875,9 @@ void skl_write_plane_wm(struct intel_crtc *intel_crtc, | ||||
| 			    &ddb->y_plane[pipe][plane]); | ||||
| } | ||||
| 
 | ||||
| void skl_write_cursor_wm(struct intel_crtc *intel_crtc, | ||||
| 			 const struct skl_plane_wm *wm, | ||||
| 			 const struct skl_ddb_allocation *ddb) | ||||
| static void skl_write_cursor_wm(struct intel_crtc *intel_crtc, | ||||
| 				const struct skl_plane_wm *wm, | ||||
| 				const struct skl_ddb_allocation *ddb) | ||||
| { | ||||
| 	struct drm_crtc *crtc = &intel_crtc->base; | ||||
| 	struct drm_device *dev = crtc->dev; | ||||
|  | ||||
		Loading…
	
		Reference in New Issue
	
	Block a user