i915, amdgpu and nouveau fixes
-----BEGIN PGP SIGNATURE----- iQIcBAABAgAGBQJZ+/SnAAoJEAx081l5xIa+mv4QAKxWFXqIuhIrE1K8iSkM43Tc 9xbUbsmE0HKDn1xJmklP3upGdUzv9BBrEN+tPW6t+BfJ+swVBKdtx1lZc/k9mQ9m iLpWlefS/zc71Jpw4wGQ6Ky5gNrYkEH57D6rZs9gHYZ4TzoVGJhOPG9EGzuokU9i Cl0MXmAzr7XVCydtcmbX6NvwG4h+UmMxOCbeOjev8mJSNes/ssc8iUpE/Dx0qO5d nUK23NipTKG1xuaPDVGqC3VvXvhur9l5V/YiAYbAckAt85d2VjX6b4VqjagnlL1Z DhwNuWXwfqdzddec1j5ME/OPVw4npdeMh3mgZgMgXKzJjTwxQk3Uw5Qg0gqf5OIz xNv1BGJ8hV93wgAicfSRA1mVqpFl16hOzK5sNXJM4vsTuVS90y32Mkow17kXNz2m sD0vm3joldO3fe0DQy5huAUMmiQDl9dVcsXIM/xRt2GdGd7dHdwc0ODSI241pecr SgpePYAsp/ISR2tW4+9u8+ol4GN06gXXX9KxZW59XwglqQM2sHC4UGPXQt0Fl/f2 xh7PxE4er9qmDcc7/VyAMpeeUCLguEeFFDz5X5A2pd08cgZR++1+TOj9HBuSh1Ko se3zakshB8zdWBdSIEFCOnBV6kJR59Vs3a6F6XQ8jqubzPp/Dv51cvz2Y59smRgV A/30msWIALKTm2kV32/h =s45z -----END PGP SIGNATURE----- Merge tag 'drm-fixes-for-v4.14-rc8' of git://people.freedesktop.org/~airlied/linux Pull drm fixes from Dave Airlie: - one nouveau regression fix - some amdgpu fixes for stable to fix hangs on some harvested Polaris GPUs - a set of KASAN and regression fixes for i915, their CI system seems to be working pretty well now. * tag 'drm-fixes-for-v4.14-rc8' of git://people.freedesktop.org/~airlied/linux: drm/amdgpu: allow harvesting check for Polaris VCE drm/amdgpu: return -ENOENT from uvd 6.0 early init for harvesting drm/i915: Check incoming alignment for unfenced buffers (on i915gm) drm/nouveau/kms/nv50: use the correct state for base channel notifier setup drm/i915: Hold rcu_read_lock when iterating over the radixtree (vma idr) drm/i915: Hold rcu_read_lock when iterating over the radixtree (objects) drm/i915/edp: read edp display control registers unconditionally drm/i915: Do not rely on wm preservation for ILK watermarks drm/i915: Cancel the modeset retry work during modeset cleanup
This commit is contained in:
		
						commit
						e65a139d5b
					
				| @ -93,6 +93,10 @@ static int uvd_v6_0_early_init(void *handle) | ||||
| { | ||||
| 	struct amdgpu_device *adev = (struct amdgpu_device *)handle; | ||||
| 
 | ||||
| 	if (!(adev->flags & AMD_IS_APU) && | ||||
| 	    (RREG32_SMC(ixCC_HARVEST_FUSES) & CC_HARVEST_FUSES__UVD_DISABLE_MASK)) | ||||
| 		return -ENOENT; | ||||
| 
 | ||||
| 	uvd_v6_0_set_ring_funcs(adev); | ||||
| 	uvd_v6_0_set_irq_funcs(adev); | ||||
| 
 | ||||
|  | ||||
| @ -365,15 +365,10 @@ static unsigned vce_v3_0_get_harvest_config(struct amdgpu_device *adev) | ||||
| { | ||||
| 	u32 tmp; | ||||
| 
 | ||||
| 	/* Fiji, Stoney, Polaris10, Polaris11, Polaris12 are single pipe */ | ||||
| 	if ((adev->asic_type == CHIP_FIJI) || | ||||
| 	    (adev->asic_type == CHIP_STONEY) || | ||||
| 	    (adev->asic_type == CHIP_POLARIS10) || | ||||
| 	    (adev->asic_type == CHIP_POLARIS11) || | ||||
| 	    (adev->asic_type == CHIP_POLARIS12)) | ||||
| 	    (adev->asic_type == CHIP_STONEY)) | ||||
| 		return AMDGPU_VCE_HARVEST_VCE1; | ||||
| 
 | ||||
| 	/* Tonga and CZ are dual or single pipe */ | ||||
| 	if (adev->flags & AMD_IS_APU) | ||||
| 		tmp = (RREG32_SMC(ixVCE_HARVEST_FUSE_MACRO__ADDRESS) & | ||||
| 		       VCE_HARVEST_FUSE_MACRO__MASK) >> | ||||
| @ -391,6 +386,11 @@ static unsigned vce_v3_0_get_harvest_config(struct amdgpu_device *adev) | ||||
| 	case 3: | ||||
| 		return AMDGPU_VCE_HARVEST_VCE0 | AMDGPU_VCE_HARVEST_VCE1; | ||||
| 	default: | ||||
| 		if ((adev->asic_type == CHIP_POLARIS10) || | ||||
| 		    (adev->asic_type == CHIP_POLARIS11) || | ||||
| 		    (adev->asic_type == CHIP_POLARIS12)) | ||||
| 			return AMDGPU_VCE_HARVEST_VCE1; | ||||
| 
 | ||||
| 		return 0; | ||||
| 	} | ||||
| } | ||||
|  | ||||
| @ -2214,8 +2214,10 @@ static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj) | ||||
| 	struct radix_tree_iter iter; | ||||
| 	void __rcu **slot; | ||||
| 
 | ||||
| 	rcu_read_lock(); | ||||
| 	radix_tree_for_each_slot(slot, &obj->mm.get_page.radix, &iter, 0) | ||||
| 		radix_tree_delete(&obj->mm.get_page.radix, iter.index); | ||||
| 	rcu_read_unlock(); | ||||
| } | ||||
| 
 | ||||
| void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj, | ||||
|  | ||||
| @ -104,6 +104,7 @@ static void lut_close(struct i915_gem_context *ctx) | ||||
| 		kmem_cache_free(ctx->i915->luts, lut); | ||||
| 	} | ||||
| 
 | ||||
| 	rcu_read_lock(); | ||||
| 	radix_tree_for_each_slot(slot, &ctx->handles_vma, &iter, 0) { | ||||
| 		struct i915_vma *vma = rcu_dereference_raw(*slot); | ||||
| 		struct drm_i915_gem_object *obj = vma->obj; | ||||
| @ -115,6 +116,7 @@ static void lut_close(struct i915_gem_context *ctx) | ||||
| 
 | ||||
| 		__i915_gem_object_release_unless_active(obj); | ||||
| 	} | ||||
| 	rcu_read_unlock(); | ||||
| } | ||||
| 
 | ||||
| static void i915_gem_context_free(struct i915_gem_context *ctx) | ||||
|  | ||||
| @ -337,6 +337,10 @@ eb_vma_misplaced(const struct drm_i915_gem_exec_object2 *entry, | ||||
| 	    (vma->node.start + vma->node.size - 1) >> 32) | ||||
| 		return true; | ||||
| 
 | ||||
| 	if (flags & __EXEC_OBJECT_NEEDS_MAP && | ||||
| 	    !i915_vma_is_map_and_fenceable(vma)) | ||||
| 		return true; | ||||
| 
 | ||||
| 	return false; | ||||
| } | ||||
| 
 | ||||
|  | ||||
| @ -15227,6 +15227,23 @@ void intel_connector_unregister(struct drm_connector *connector) | ||||
| 	intel_panel_destroy_backlight(connector); | ||||
| } | ||||
| 
 | ||||
| static void intel_hpd_poll_fini(struct drm_device *dev) | ||||
| { | ||||
| 	struct intel_connector *connector; | ||||
| 	struct drm_connector_list_iter conn_iter; | ||||
| 
 | ||||
| 	/* First disable polling... */ | ||||
| 	drm_kms_helper_poll_fini(dev); | ||||
| 
 | ||||
| 	/* Then kill the work that may have been queued by hpd. */ | ||||
| 	drm_connector_list_iter_begin(dev, &conn_iter); | ||||
| 	for_each_intel_connector_iter(connector, &conn_iter) { | ||||
| 		if (connector->modeset_retry_work.func) | ||||
| 			cancel_work_sync(&connector->modeset_retry_work); | ||||
| 	} | ||||
| 	drm_connector_list_iter_end(&conn_iter); | ||||
| } | ||||
| 
 | ||||
| void intel_modeset_cleanup(struct drm_device *dev) | ||||
| { | ||||
| 	struct drm_i915_private *dev_priv = to_i915(dev); | ||||
| @ -15247,7 +15264,7 @@ void intel_modeset_cleanup(struct drm_device *dev) | ||||
| 	 * Due to the hpd irq storm handling the hotplug work can re-arm the | ||||
| 	 * poll handlers. Hence disable polling after hpd handling is shut down. | ||||
| 	 */ | ||||
| 	drm_kms_helper_poll_fini(dev); | ||||
| 	intel_hpd_poll_fini(dev); | ||||
| 
 | ||||
| 	/* poll work can call into fbdev, hence clean that up afterwards */ | ||||
| 	intel_fbdev_fini(dev_priv); | ||||
|  | ||||
| @ -3731,9 +3731,16 @@ intel_edp_init_dpcd(struct intel_dp *intel_dp) | ||||
| 
 | ||||
| 	} | ||||
| 
 | ||||
| 	/* Read the eDP Display control capabilities registers */ | ||||
| 	if ((intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] & DP_DPCD_DISPLAY_CONTROL_CAPABLE) && | ||||
| 	    drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_DPCD_REV, | ||||
| 	/*
 | ||||
| 	 * Read the eDP display control registers. | ||||
| 	 * | ||||
| 	 * Do this independent of DP_DPCD_DISPLAY_CONTROL_CAPABLE bit in | ||||
| 	 * DP_EDP_CONFIGURATION_CAP, because some buggy displays do not have it | ||||
| 	 * set, but require eDP 1.4+ detection (e.g. for supported link rates | ||||
| 	 * method). The display control registers should read zero if they're | ||||
| 	 * not supported anyway. | ||||
| 	 */ | ||||
| 	if (drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_DPCD_REV, | ||||
| 			     intel_dp->edp_dpcd, sizeof(intel_dp->edp_dpcd)) == | ||||
| 			     sizeof(intel_dp->edp_dpcd)) | ||||
| 		DRM_DEBUG_KMS("EDP DPCD : %*ph\n", (int) sizeof(intel_dp->edp_dpcd), | ||||
|  | ||||
| @ -496,7 +496,6 @@ struct intel_crtc_scaler_state { | ||||
| 
 | ||||
| struct intel_pipe_wm { | ||||
| 	struct intel_wm_level wm[5]; | ||||
| 	struct intel_wm_level raw_wm[5]; | ||||
| 	uint32_t linetime; | ||||
| 	bool fbc_wm_enabled; | ||||
| 	bool pipe_enabled; | ||||
|  | ||||
| @ -2716,9 +2716,9 @@ static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv, | ||||
| 				 const struct intel_crtc *intel_crtc, | ||||
| 				 int level, | ||||
| 				 struct intel_crtc_state *cstate, | ||||
| 				 struct intel_plane_state *pristate, | ||||
| 				 struct intel_plane_state *sprstate, | ||||
| 				 struct intel_plane_state *curstate, | ||||
| 				 const struct intel_plane_state *pristate, | ||||
| 				 const struct intel_plane_state *sprstate, | ||||
| 				 const struct intel_plane_state *curstate, | ||||
| 				 struct intel_wm_level *result) | ||||
| { | ||||
| 	uint16_t pri_latency = dev_priv->wm.pri_latency[level]; | ||||
| @ -3038,28 +3038,24 @@ static int ilk_compute_pipe_wm(struct intel_crtc_state *cstate) | ||||
| 	struct intel_pipe_wm *pipe_wm; | ||||
| 	struct drm_device *dev = state->dev; | ||||
| 	const struct drm_i915_private *dev_priv = to_i915(dev); | ||||
| 	struct intel_plane *intel_plane; | ||||
| 	struct intel_plane_state *pristate = NULL; | ||||
| 	struct intel_plane_state *sprstate = NULL; | ||||
| 	struct intel_plane_state *curstate = NULL; | ||||
| 	struct drm_plane *plane; | ||||
| 	const struct drm_plane_state *plane_state; | ||||
| 	const struct intel_plane_state *pristate = NULL; | ||||
| 	const struct intel_plane_state *sprstate = NULL; | ||||
| 	const struct intel_plane_state *curstate = NULL; | ||||
| 	int level, max_level = ilk_wm_max_level(dev_priv), usable_level; | ||||
| 	struct ilk_wm_maximums max; | ||||
| 
 | ||||
| 	pipe_wm = &cstate->wm.ilk.optimal; | ||||
| 
 | ||||
| 	for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) { | ||||
| 		struct intel_plane_state *ps; | ||||
| 	drm_atomic_crtc_state_for_each_plane_state(plane, plane_state, &cstate->base) { | ||||
| 		const struct intel_plane_state *ps = to_intel_plane_state(plane_state); | ||||
| 
 | ||||
| 		ps = intel_atomic_get_existing_plane_state(state, | ||||
| 							   intel_plane); | ||||
| 		if (!ps) | ||||
| 			continue; | ||||
| 
 | ||||
| 		if (intel_plane->base.type == DRM_PLANE_TYPE_PRIMARY) | ||||
| 		if (plane->type == DRM_PLANE_TYPE_PRIMARY) | ||||
| 			pristate = ps; | ||||
| 		else if (intel_plane->base.type == DRM_PLANE_TYPE_OVERLAY) | ||||
| 		else if (plane->type == DRM_PLANE_TYPE_OVERLAY) | ||||
| 			sprstate = ps; | ||||
| 		else if (intel_plane->base.type == DRM_PLANE_TYPE_CURSOR) | ||||
| 		else if (plane->type == DRM_PLANE_TYPE_CURSOR) | ||||
| 			curstate = ps; | ||||
| 	} | ||||
| 
 | ||||
| @ -3081,11 +3077,9 @@ static int ilk_compute_pipe_wm(struct intel_crtc_state *cstate) | ||||
| 	if (pipe_wm->sprites_scaled) | ||||
| 		usable_level = 0; | ||||
| 
 | ||||
| 	ilk_compute_wm_level(dev_priv, intel_crtc, 0, cstate, | ||||
| 			     pristate, sprstate, curstate, &pipe_wm->raw_wm[0]); | ||||
| 
 | ||||
| 	memset(&pipe_wm->wm, 0, sizeof(pipe_wm->wm)); | ||||
| 	pipe_wm->wm[0] = pipe_wm->raw_wm[0]; | ||||
| 	ilk_compute_wm_level(dev_priv, intel_crtc, 0, cstate, | ||||
| 			     pristate, sprstate, curstate, &pipe_wm->wm[0]); | ||||
| 
 | ||||
| 	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) | ||||
| 		pipe_wm->linetime = hsw_compute_linetime_wm(cstate); | ||||
| @ -3095,8 +3089,8 @@ static int ilk_compute_pipe_wm(struct intel_crtc_state *cstate) | ||||
| 
 | ||||
| 	ilk_compute_wm_reg_maximums(dev_priv, 1, &max); | ||||
| 
 | ||||
| 	for (level = 1; level <= max_level; level++) { | ||||
| 		struct intel_wm_level *wm = &pipe_wm->raw_wm[level]; | ||||
| 	for (level = 1; level <= usable_level; level++) { | ||||
| 		struct intel_wm_level *wm = &pipe_wm->wm[level]; | ||||
| 
 | ||||
| 		ilk_compute_wm_level(dev_priv, intel_crtc, level, cstate, | ||||
| 				     pristate, sprstate, curstate, wm); | ||||
| @ -3106,13 +3100,10 @@ static int ilk_compute_pipe_wm(struct intel_crtc_state *cstate) | ||||
| 		 * register maximums since such watermarks are | ||||
| 		 * always invalid. | ||||
| 		 */ | ||||
| 		if (level > usable_level) | ||||
| 			continue; | ||||
| 
 | ||||
| 		if (ilk_validate_wm_level(level, &max, wm)) | ||||
| 			pipe_wm->wm[level] = *wm; | ||||
| 		else | ||||
| 			usable_level = level; | ||||
| 		if (!ilk_validate_wm_level(level, &max, wm)) { | ||||
| 			memset(wm, 0, sizeof(*wm)); | ||||
| 			break; | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	return 0; | ||||
|  | ||||
| @ -4099,7 +4099,7 @@ nv50_disp_atomic_commit(struct drm_device *dev, | ||||
| { | ||||
| 	struct nouveau_drm *drm = nouveau_drm(dev); | ||||
| 	struct nv50_disp *disp = nv50_disp(dev); | ||||
| 	struct drm_plane_state *old_plane_state; | ||||
| 	struct drm_plane_state *new_plane_state; | ||||
| 	struct drm_plane *plane; | ||||
| 	struct drm_crtc *crtc; | ||||
| 	bool active = false; | ||||
| @ -4129,8 +4129,8 @@ nv50_disp_atomic_commit(struct drm_device *dev, | ||||
| 	if (ret) | ||||
| 		goto err_cleanup; | ||||
| 
 | ||||
| 	for_each_old_plane_in_state(state, plane, old_plane_state, i) { | ||||
| 		struct nv50_wndw_atom *asyw = nv50_wndw_atom(old_plane_state); | ||||
| 	for_each_new_plane_in_state(state, plane, new_plane_state, i) { | ||||
| 		struct nv50_wndw_atom *asyw = nv50_wndw_atom(new_plane_state); | ||||
| 		struct nv50_wndw *wndw = nv50_wndw(plane); | ||||
| 
 | ||||
| 		if (asyw->set.image) { | ||||
|  | ||||
		Loading…
	
		Reference in New Issue
	
	Block a user