Merge branch 'drm-intel-fixes' of git://people.freedesktop.org/~danvet/drm-intel into drm-fixes
Daniel writes: "- some register magic to fix hsw crw (Paulo&Ben) - fix backlight destruction for cpu edp (Jani) - fix gen ch7xxx dvo ->get_hw_state - fixup the plane->pipe fixup code, the broken version massively angers the modeset sanity checks - kill pipe A quirk for i855gm, otherwise I get a black screen with the above patch - fixup for gem_get_page helper (Chris) - fixup guardband clipping w/a (Ken), without this mesa master can erronously drop vertices on snb, mesa 9.0 has the optimization reverted - another pageflip vs. modeset fix - kill bogus BUG_ON which broke ums+gem from Willy Tarreau (gasp, people are still using this!)" * 'drm-intel-fixes' of git://people.freedesktop.org/~danvet/drm-intel: drm/i915: fix non-DP-D eDP backlight cleanup and module reload drm/i915: HSW CRW stability magic drm/i915/dvo-ch7xxx: fix get_hw_state drm/i915: fixup the plane->pipe fixup code drm/i915: rip out the pipe A quirk for i855gm drm/i915: disable wc gtt pte mappings on gen2 drm/i915: fixup i915_gem_object_get_page inline helper drm/i915: Disallow preallocation of requests drm/i915: Set guardband clipping workaround bit in the right register. drm/i915: paper over a pipe-enable vs pageflip race drm/i915: remove useless BUG_ON which caused a regression in 3.5.
This commit is contained in:
@@ -667,7 +667,7 @@ static int intel_gtt_init(void)
|
|||||||
gtt_map_size = intel_private.base.gtt_total_entries * 4;
|
gtt_map_size = intel_private.base.gtt_total_entries * 4;
|
||||||
|
|
||||||
intel_private.gtt = NULL;
|
intel_private.gtt = NULL;
|
||||||
if (INTEL_GTT_GEN < 6)
|
if (INTEL_GTT_GEN < 6 && INTEL_GTT_GEN > 2)
|
||||||
intel_private.gtt = ioremap_wc(intel_private.gtt_bus_addr,
|
intel_private.gtt = ioremap_wc(intel_private.gtt_bus_addr,
|
||||||
gtt_map_size);
|
gtt_map_size);
|
||||||
if (intel_private.gtt == NULL)
|
if (intel_private.gtt == NULL)
|
||||||
|
|||||||
@@ -303,10 +303,10 @@ static bool ch7xxx_get_hw_state(struct intel_dvo_device *dvo)
|
|||||||
|
|
||||||
ch7xxx_readb(dvo, CH7xxx_PM, &val);
|
ch7xxx_readb(dvo, CH7xxx_PM, &val);
|
||||||
|
|
||||||
if (val & CH7xxx_PM_FPD)
|
if (val & (CH7xxx_PM_DVIL | CH7xxx_PM_DVIP))
|
||||||
return false;
|
|
||||||
else
|
|
||||||
return true;
|
return true;
|
||||||
|
else
|
||||||
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void ch7xxx_dump_regs(struct intel_dvo_device *dvo)
|
static void ch7xxx_dump_regs(struct intel_dvo_device *dvo)
|
||||||
|
|||||||
@@ -1341,9 +1341,14 @@ int __must_check i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
|
|||||||
static inline struct page *i915_gem_object_get_page(struct drm_i915_gem_object *obj, int n)
|
static inline struct page *i915_gem_object_get_page(struct drm_i915_gem_object *obj, int n)
|
||||||
{
|
{
|
||||||
struct scatterlist *sg = obj->pages->sgl;
|
struct scatterlist *sg = obj->pages->sgl;
|
||||||
while (n >= SG_MAX_SINGLE_ALLOC) {
|
int nents = obj->pages->nents;
|
||||||
|
while (nents > SG_MAX_SINGLE_ALLOC) {
|
||||||
|
if (n < SG_MAX_SINGLE_ALLOC - 1)
|
||||||
|
break;
|
||||||
|
|
||||||
sg = sg_chain_ptr(sg + SG_MAX_SINGLE_ALLOC - 1);
|
sg = sg_chain_ptr(sg + SG_MAX_SINGLE_ALLOC - 1);
|
||||||
n -= SG_MAX_SINGLE_ALLOC - 1;
|
n -= SG_MAX_SINGLE_ALLOC - 1;
|
||||||
|
nents -= SG_MAX_SINGLE_ALLOC - 1;
|
||||||
}
|
}
|
||||||
return sg_page(sg+n);
|
return sg_page(sg+n);
|
||||||
}
|
}
|
||||||
@@ -1427,7 +1432,7 @@ int __must_check i915_gpu_idle(struct drm_device *dev);
|
|||||||
int __must_check i915_gem_idle(struct drm_device *dev);
|
int __must_check i915_gem_idle(struct drm_device *dev);
|
||||||
int i915_add_request(struct intel_ring_buffer *ring,
|
int i915_add_request(struct intel_ring_buffer *ring,
|
||||||
struct drm_file *file,
|
struct drm_file *file,
|
||||||
struct drm_i915_gem_request *request);
|
u32 *seqno);
|
||||||
int __must_check i915_wait_seqno(struct intel_ring_buffer *ring,
|
int __must_check i915_wait_seqno(struct intel_ring_buffer *ring,
|
||||||
uint32_t seqno);
|
uint32_t seqno);
|
||||||
int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
|
int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
|
||||||
|
|||||||
@@ -1955,11 +1955,12 @@ i915_gem_next_request_seqno(struct intel_ring_buffer *ring)
|
|||||||
int
|
int
|
||||||
i915_add_request(struct intel_ring_buffer *ring,
|
i915_add_request(struct intel_ring_buffer *ring,
|
||||||
struct drm_file *file,
|
struct drm_file *file,
|
||||||
struct drm_i915_gem_request *request)
|
u32 *out_seqno)
|
||||||
{
|
{
|
||||||
drm_i915_private_t *dev_priv = ring->dev->dev_private;
|
drm_i915_private_t *dev_priv = ring->dev->dev_private;
|
||||||
uint32_t seqno;
|
struct drm_i915_gem_request *request;
|
||||||
u32 request_ring_position;
|
u32 request_ring_position;
|
||||||
|
u32 seqno;
|
||||||
int was_empty;
|
int was_empty;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
@@ -1974,11 +1975,9 @@ i915_add_request(struct intel_ring_buffer *ring,
|
|||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
if (request == NULL) {
|
request = kmalloc(sizeof(*request), GFP_KERNEL);
|
||||||
request = kmalloc(sizeof(*request), GFP_KERNEL);
|
if (request == NULL)
|
||||||
if (request == NULL)
|
return -ENOMEM;
|
||||||
return -ENOMEM;
|
|
||||||
}
|
|
||||||
|
|
||||||
seqno = i915_gem_next_request_seqno(ring);
|
seqno = i915_gem_next_request_seqno(ring);
|
||||||
|
|
||||||
@@ -2030,6 +2029,8 @@ i915_add_request(struct intel_ring_buffer *ring,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (out_seqno)
|
||||||
|
*out_seqno = seqno;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -3959,6 +3960,9 @@ i915_gem_init_hw(struct drm_device *dev)
|
|||||||
if (!intel_enable_gtt())
|
if (!intel_enable_gtt())
|
||||||
return -EIO;
|
return -EIO;
|
||||||
|
|
||||||
|
if (IS_HASWELL(dev) && (I915_READ(0x120010) == 1))
|
||||||
|
I915_WRITE(0x9008, I915_READ(0x9008) | 0xf0000);
|
||||||
|
|
||||||
i915_gem_l3_remap(dev);
|
i915_gem_l3_remap(dev);
|
||||||
|
|
||||||
i915_gem_init_swizzling(dev);
|
i915_gem_init_swizzling(dev);
|
||||||
@@ -4098,7 +4102,6 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
|
|||||||
}
|
}
|
||||||
|
|
||||||
BUG_ON(!list_empty(&dev_priv->mm.active_list));
|
BUG_ON(!list_empty(&dev_priv->mm.active_list));
|
||||||
BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
|
|
||||||
mutex_unlock(&dev->struct_mutex);
|
mutex_unlock(&dev->struct_mutex);
|
||||||
|
|
||||||
ret = drm_irq_install(dev);
|
ret = drm_irq_install(dev);
|
||||||
|
|||||||
@@ -521,7 +521,7 @@
|
|||||||
*/
|
*/
|
||||||
# define _3D_CHICKEN2_WM_READ_PIPELINED (1 << 14)
|
# define _3D_CHICKEN2_WM_READ_PIPELINED (1 << 14)
|
||||||
#define _3D_CHICKEN3 0x02090
|
#define _3D_CHICKEN3 0x02090
|
||||||
#define _3D_CHICKEN_SF_DISABLE_FASTCLIP_CULL (1 << 5)
|
#define _3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL (1 << 5)
|
||||||
|
|
||||||
#define MI_MODE 0x0209c
|
#define MI_MODE 0x0209c
|
||||||
# define VS_TIMER_DISPATCH (1 << 6)
|
# define VS_TIMER_DISPATCH (1 << 6)
|
||||||
|
|||||||
@@ -3253,6 +3253,16 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
|
|||||||
|
|
||||||
if (HAS_PCH_CPT(dev))
|
if (HAS_PCH_CPT(dev))
|
||||||
intel_cpt_verify_modeset(dev, intel_crtc->pipe);
|
intel_cpt_verify_modeset(dev, intel_crtc->pipe);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* There seems to be a race in PCH platform hw (at least on some
|
||||||
|
* outputs) where an enabled pipe still completes any pageflip right
|
||||||
|
* away (as if the pipe is off) instead of waiting for vblank. As soon
|
||||||
|
* as the first vblank happend, everything works as expected. Hence just
|
||||||
|
* wait for one vblank before returning to avoid strange things
|
||||||
|
* happening.
|
||||||
|
*/
|
||||||
|
intel_wait_for_vblank(dev, intel_crtc->pipe);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void ironlake_crtc_disable(struct drm_crtc *crtc)
|
static void ironlake_crtc_disable(struct drm_crtc *crtc)
|
||||||
@@ -7892,8 +7902,7 @@ static struct intel_quirk intel_quirks[] = {
|
|||||||
/* ThinkPad T60 needs pipe A force quirk (bug #16494) */
|
/* ThinkPad T60 needs pipe A force quirk (bug #16494) */
|
||||||
{ 0x2782, 0x17aa, 0x201a, quirk_pipea_force },
|
{ 0x2782, 0x17aa, 0x201a, quirk_pipea_force },
|
||||||
|
|
||||||
/* 855 & before need to leave pipe A & dpll A up */
|
/* 830/845 need to leave pipe A & dpll A up */
|
||||||
{ 0x3582, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
|
|
||||||
{ 0x2562, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
|
{ 0x2562, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
|
||||||
{ 0x3577, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
|
{ 0x3577, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
|
||||||
|
|
||||||
@@ -8049,29 +8058,42 @@ static void intel_enable_pipe_a(struct drm_device *dev)
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static bool
|
||||||
|
intel_check_plane_mapping(struct intel_crtc *crtc)
|
||||||
|
{
|
||||||
|
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
|
||||||
|
u32 reg, val;
|
||||||
|
|
||||||
|
if (dev_priv->num_pipe == 1)
|
||||||
|
return true;
|
||||||
|
|
||||||
|
reg = DSPCNTR(!crtc->plane);
|
||||||
|
val = I915_READ(reg);
|
||||||
|
|
||||||
|
if ((val & DISPLAY_PLANE_ENABLE) &&
|
||||||
|
(!!(val & DISPPLANE_SEL_PIPE_MASK) == crtc->pipe))
|
||||||
|
return false;
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
static void intel_sanitize_crtc(struct intel_crtc *crtc)
|
static void intel_sanitize_crtc(struct intel_crtc *crtc)
|
||||||
{
|
{
|
||||||
struct drm_device *dev = crtc->base.dev;
|
struct drm_device *dev = crtc->base.dev;
|
||||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||||
u32 reg, val;
|
u32 reg;
|
||||||
|
|
||||||
/* Clear any frame start delays used for debugging left by the BIOS */
|
/* Clear any frame start delays used for debugging left by the BIOS */
|
||||||
reg = PIPECONF(crtc->pipe);
|
reg = PIPECONF(crtc->pipe);
|
||||||
I915_WRITE(reg, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
|
I915_WRITE(reg, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
|
||||||
|
|
||||||
/* We need to sanitize the plane -> pipe mapping first because this will
|
/* We need to sanitize the plane -> pipe mapping first because this will
|
||||||
* disable the crtc (and hence change the state) if it is wrong. */
|
* disable the crtc (and hence change the state) if it is wrong. Note
|
||||||
if (!HAS_PCH_SPLIT(dev)) {
|
* that gen4+ has a fixed plane -> pipe mapping. */
|
||||||
|
if (INTEL_INFO(dev)->gen < 4 && !intel_check_plane_mapping(crtc)) {
|
||||||
struct intel_connector *connector;
|
struct intel_connector *connector;
|
||||||
bool plane;
|
bool plane;
|
||||||
|
|
||||||
reg = DSPCNTR(crtc->plane);
|
|
||||||
val = I915_READ(reg);
|
|
||||||
|
|
||||||
if ((val & DISPLAY_PLANE_ENABLE) == 0 &&
|
|
||||||
(!!(val & DISPPLANE_SEL_PIPE_MASK) == crtc->pipe))
|
|
||||||
goto ok;
|
|
||||||
|
|
||||||
DRM_DEBUG_KMS("[CRTC:%d] wrong plane connection detected!\n",
|
DRM_DEBUG_KMS("[CRTC:%d] wrong plane connection detected!\n",
|
||||||
crtc->base.base.id);
|
crtc->base.base.id);
|
||||||
|
|
||||||
@@ -8095,7 +8117,6 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc)
|
|||||||
WARN_ON(crtc->active);
|
WARN_ON(crtc->active);
|
||||||
crtc->base.enabled = false;
|
crtc->base.enabled = false;
|
||||||
}
|
}
|
||||||
ok:
|
|
||||||
|
|
||||||
if (dev_priv->quirks & QUIRK_PIPEA_FORCE &&
|
if (dev_priv->quirks & QUIRK_PIPEA_FORCE &&
|
||||||
crtc->pipe == PIPE_A && !crtc->active) {
|
crtc->pipe == PIPE_A && !crtc->active) {
|
||||||
|
|||||||
@@ -2369,8 +2369,9 @@ static void
|
|||||||
intel_dp_destroy(struct drm_connector *connector)
|
intel_dp_destroy(struct drm_connector *connector)
|
||||||
{
|
{
|
||||||
struct drm_device *dev = connector->dev;
|
struct drm_device *dev = connector->dev;
|
||||||
|
struct intel_dp *intel_dp = intel_attached_dp(connector);
|
||||||
|
|
||||||
if (intel_dpd_is_edp(dev))
|
if (is_edp(intel_dp))
|
||||||
intel_panel_destroy_backlight(dev);
|
intel_panel_destroy_backlight(dev);
|
||||||
|
|
||||||
drm_sysfs_connector_remove(connector);
|
drm_sysfs_connector_remove(connector);
|
||||||
|
|||||||
@@ -209,7 +209,6 @@ static void intel_overlay_unmap_regs(struct intel_overlay *overlay,
|
|||||||
}
|
}
|
||||||
|
|
||||||
static int intel_overlay_do_wait_request(struct intel_overlay *overlay,
|
static int intel_overlay_do_wait_request(struct intel_overlay *overlay,
|
||||||
struct drm_i915_gem_request *request,
|
|
||||||
void (*tail)(struct intel_overlay *))
|
void (*tail)(struct intel_overlay *))
|
||||||
{
|
{
|
||||||
struct drm_device *dev = overlay->dev;
|
struct drm_device *dev = overlay->dev;
|
||||||
@@ -218,12 +217,10 @@ static int intel_overlay_do_wait_request(struct intel_overlay *overlay,
|
|||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
BUG_ON(overlay->last_flip_req);
|
BUG_ON(overlay->last_flip_req);
|
||||||
ret = i915_add_request(ring, NULL, request);
|
ret = i915_add_request(ring, NULL, &overlay->last_flip_req);
|
||||||
if (ret) {
|
if (ret)
|
||||||
kfree(request);
|
return ret;
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
overlay->last_flip_req = request->seqno;
|
|
||||||
overlay->flip_tail = tail;
|
overlay->flip_tail = tail;
|
||||||
ret = i915_wait_seqno(ring, overlay->last_flip_req);
|
ret = i915_wait_seqno(ring, overlay->last_flip_req);
|
||||||
if (ret)
|
if (ret)
|
||||||
@@ -240,7 +237,6 @@ static int intel_overlay_on(struct intel_overlay *overlay)
|
|||||||
struct drm_device *dev = overlay->dev;
|
struct drm_device *dev = overlay->dev;
|
||||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||||
struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
|
struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
|
||||||
struct drm_i915_gem_request *request;
|
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
BUG_ON(overlay->active);
|
BUG_ON(overlay->active);
|
||||||
@@ -248,17 +244,9 @@ static int intel_overlay_on(struct intel_overlay *overlay)
|
|||||||
|
|
||||||
WARN_ON(IS_I830(dev) && !(dev_priv->quirks & QUIRK_PIPEA_FORCE));
|
WARN_ON(IS_I830(dev) && !(dev_priv->quirks & QUIRK_PIPEA_FORCE));
|
||||||
|
|
||||||
request = kzalloc(sizeof(*request), GFP_KERNEL);
|
|
||||||
if (request == NULL) {
|
|
||||||
ret = -ENOMEM;
|
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
|
|
||||||
ret = intel_ring_begin(ring, 4);
|
ret = intel_ring_begin(ring, 4);
|
||||||
if (ret) {
|
if (ret)
|
||||||
kfree(request);
|
return ret;
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
|
|
||||||
intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_ON);
|
intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_ON);
|
||||||
intel_ring_emit(ring, overlay->flip_addr | OFC_UPDATE);
|
intel_ring_emit(ring, overlay->flip_addr | OFC_UPDATE);
|
||||||
@@ -266,9 +254,7 @@ static int intel_overlay_on(struct intel_overlay *overlay)
|
|||||||
intel_ring_emit(ring, MI_NOOP);
|
intel_ring_emit(ring, MI_NOOP);
|
||||||
intel_ring_advance(ring);
|
intel_ring_advance(ring);
|
||||||
|
|
||||||
ret = intel_overlay_do_wait_request(overlay, request, NULL);
|
return intel_overlay_do_wait_request(overlay, NULL);
|
||||||
out:
|
|
||||||
return ret;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* overlay needs to be enabled in OCMD reg */
|
/* overlay needs to be enabled in OCMD reg */
|
||||||
@@ -278,17 +264,12 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
|
|||||||
struct drm_device *dev = overlay->dev;
|
struct drm_device *dev = overlay->dev;
|
||||||
drm_i915_private_t *dev_priv = dev->dev_private;
|
drm_i915_private_t *dev_priv = dev->dev_private;
|
||||||
struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
|
struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
|
||||||
struct drm_i915_gem_request *request;
|
|
||||||
u32 flip_addr = overlay->flip_addr;
|
u32 flip_addr = overlay->flip_addr;
|
||||||
u32 tmp;
|
u32 tmp;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
BUG_ON(!overlay->active);
|
BUG_ON(!overlay->active);
|
||||||
|
|
||||||
request = kzalloc(sizeof(*request), GFP_KERNEL);
|
|
||||||
if (request == NULL)
|
|
||||||
return -ENOMEM;
|
|
||||||
|
|
||||||
if (load_polyphase_filter)
|
if (load_polyphase_filter)
|
||||||
flip_addr |= OFC_UPDATE;
|
flip_addr |= OFC_UPDATE;
|
||||||
|
|
||||||
@@ -298,22 +279,14 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
|
|||||||
DRM_DEBUG("overlay underrun, DOVSTA: %x\n", tmp);
|
DRM_DEBUG("overlay underrun, DOVSTA: %x\n", tmp);
|
||||||
|
|
||||||
ret = intel_ring_begin(ring, 2);
|
ret = intel_ring_begin(ring, 2);
|
||||||
if (ret) {
|
if (ret)
|
||||||
kfree(request);
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
|
||||||
intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
|
intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
|
||||||
intel_ring_emit(ring, flip_addr);
|
intel_ring_emit(ring, flip_addr);
|
||||||
intel_ring_advance(ring);
|
intel_ring_advance(ring);
|
||||||
|
|
||||||
ret = i915_add_request(ring, NULL, request);
|
return i915_add_request(ring, NULL, &overlay->last_flip_req);
|
||||||
if (ret) {
|
|
||||||
kfree(request);
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
overlay->last_flip_req = request->seqno;
|
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void intel_overlay_release_old_vid_tail(struct intel_overlay *overlay)
|
static void intel_overlay_release_old_vid_tail(struct intel_overlay *overlay)
|
||||||
@@ -349,15 +322,10 @@ static int intel_overlay_off(struct intel_overlay *overlay)
|
|||||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||||
struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
|
struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
|
||||||
u32 flip_addr = overlay->flip_addr;
|
u32 flip_addr = overlay->flip_addr;
|
||||||
struct drm_i915_gem_request *request;
|
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
BUG_ON(!overlay->active);
|
BUG_ON(!overlay->active);
|
||||||
|
|
||||||
request = kzalloc(sizeof(*request), GFP_KERNEL);
|
|
||||||
if (request == NULL)
|
|
||||||
return -ENOMEM;
|
|
||||||
|
|
||||||
/* According to intel docs the overlay hw may hang (when switching
|
/* According to intel docs the overlay hw may hang (when switching
|
||||||
* off) without loading the filter coeffs. It is however unclear whether
|
* off) without loading the filter coeffs. It is however unclear whether
|
||||||
* this applies to the disabling of the overlay or to the switching off
|
* this applies to the disabling of the overlay or to the switching off
|
||||||
@@ -365,10 +333,9 @@ static int intel_overlay_off(struct intel_overlay *overlay)
|
|||||||
flip_addr |= OFC_UPDATE;
|
flip_addr |= OFC_UPDATE;
|
||||||
|
|
||||||
ret = intel_ring_begin(ring, 6);
|
ret = intel_ring_begin(ring, 6);
|
||||||
if (ret) {
|
if (ret)
|
||||||
kfree(request);
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
|
||||||
/* wait for overlay to go idle */
|
/* wait for overlay to go idle */
|
||||||
intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
|
intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
|
||||||
intel_ring_emit(ring, flip_addr);
|
intel_ring_emit(ring, flip_addr);
|
||||||
@@ -379,8 +346,7 @@ static int intel_overlay_off(struct intel_overlay *overlay)
|
|||||||
intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
|
intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
|
||||||
intel_ring_advance(ring);
|
intel_ring_advance(ring);
|
||||||
|
|
||||||
return intel_overlay_do_wait_request(overlay, request,
|
return intel_overlay_do_wait_request(overlay, intel_overlay_off_tail);
|
||||||
intel_overlay_off_tail);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* recover from an interruption due to a signal
|
/* recover from an interruption due to a signal
|
||||||
@@ -425,24 +391,16 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
|
|||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
if (I915_READ(ISR) & I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT) {
|
if (I915_READ(ISR) & I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT) {
|
||||||
struct drm_i915_gem_request *request;
|
|
||||||
|
|
||||||
/* synchronous slowpath */
|
/* synchronous slowpath */
|
||||||
request = kzalloc(sizeof(*request), GFP_KERNEL);
|
|
||||||
if (request == NULL)
|
|
||||||
return -ENOMEM;
|
|
||||||
|
|
||||||
ret = intel_ring_begin(ring, 2);
|
ret = intel_ring_begin(ring, 2);
|
||||||
if (ret) {
|
if (ret)
|
||||||
kfree(request);
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
|
||||||
|
|
||||||
intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
|
intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
|
||||||
intel_ring_emit(ring, MI_NOOP);
|
intel_ring_emit(ring, MI_NOOP);
|
||||||
intel_ring_advance(ring);
|
intel_ring_advance(ring);
|
||||||
|
|
||||||
ret = intel_overlay_do_wait_request(overlay, request,
|
ret = intel_overlay_do_wait_request(overlay,
|
||||||
intel_overlay_release_old_vid_tail);
|
intel_overlay_release_old_vid_tail);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|||||||
@@ -3442,8 +3442,8 @@ static void gen6_init_clock_gating(struct drm_device *dev)
|
|||||||
GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
|
GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
|
||||||
|
|
||||||
/* Bspec says we need to always set all mask bits. */
|
/* Bspec says we need to always set all mask bits. */
|
||||||
I915_WRITE(_3D_CHICKEN, (0xFFFF << 16) |
|
I915_WRITE(_3D_CHICKEN3, (0xFFFF << 16) |
|
||||||
_3D_CHICKEN_SF_DISABLE_FASTCLIP_CULL);
|
_3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* According to the spec the following bits should be
|
* According to the spec the following bits should be
|
||||||
|
|||||||
Reference in New Issue
Block a user