mirror of
https://github.com/torvalds/linux.git
synced 2024-12-01 00:21:32 +00:00
Merge branch 'drm-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6
* 'drm-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6: drm: index i shadowed in 2nd loop drm/nv50-nvc0: prevent multiple vm/bar flushes occuring simultanenously drm/nouveau: fix regression causing ttm to not be able to evict vram drm/i915: Rebind the buffer if its alignment constraints changes with tiling drm/i915: Disable GPU semaphores by default drm/i915: Do not overflow the MMADDR write FIFO Revert "drm/i915: fix corruptions on i8xx due to relaxed fencing"
This commit is contained in:
commit
b44a53d1da
@ -672,7 +672,7 @@ int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info)
|
||||
struct drm_crtc_helper_funcs *crtc_funcs;
|
||||
u16 *red, *green, *blue, *transp;
|
||||
struct drm_crtc *crtc;
|
||||
int i, rc = 0;
|
||||
int i, j, rc = 0;
|
||||
int start;
|
||||
|
||||
for (i = 0; i < fb_helper->crtc_count; i++) {
|
||||
@ -685,7 +685,7 @@ int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info)
|
||||
transp = cmap->transp;
|
||||
start = cmap->start;
|
||||
|
||||
for (i = 0; i < cmap->len; i++) {
|
||||
for (j = 0; j < cmap->len; j++) {
|
||||
u16 hred, hgreen, hblue, htransp = 0xffff;
|
||||
|
||||
hred = *red++;
|
||||
|
@ -865,7 +865,7 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
|
||||
int max_freq;
|
||||
|
||||
/* RPSTAT1 is in the GT power well */
|
||||
__gen6_force_wake_get(dev_priv);
|
||||
__gen6_gt_force_wake_get(dev_priv);
|
||||
|
||||
seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
|
||||
seq_printf(m, "RPSTAT1: 0x%08x\n", I915_READ(GEN6_RPSTAT1));
|
||||
@ -888,7 +888,7 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
|
||||
seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
|
||||
max_freq * 100);
|
||||
|
||||
__gen6_force_wake_put(dev_priv);
|
||||
__gen6_gt_force_wake_put(dev_priv);
|
||||
} else {
|
||||
seq_printf(m, "no P-state info available\n");
|
||||
}
|
||||
|
@ -46,6 +46,9 @@ module_param_named(fbpercrtc, i915_fbpercrtc, int, 0400);
|
||||
unsigned int i915_powersave = 1;
|
||||
module_param_named(powersave, i915_powersave, int, 0600);
|
||||
|
||||
unsigned int i915_semaphores = 0;
|
||||
module_param_named(semaphores, i915_semaphores, int, 0600);
|
||||
|
||||
unsigned int i915_enable_rc6 = 0;
|
||||
module_param_named(i915_enable_rc6, i915_enable_rc6, int, 0600);
|
||||
|
||||
@ -254,7 +257,7 @@ void intel_detect_pch (struct drm_device *dev)
|
||||
}
|
||||
}
|
||||
|
||||
void __gen6_force_wake_get(struct drm_i915_private *dev_priv)
|
||||
void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
int count;
|
||||
|
||||
@ -270,12 +273,22 @@ void __gen6_force_wake_get(struct drm_i915_private *dev_priv)
|
||||
udelay(10);
|
||||
}
|
||||
|
||||
void __gen6_force_wake_put(struct drm_i915_private *dev_priv)
|
||||
void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
I915_WRITE_NOTRACE(FORCEWAKE, 0);
|
||||
POSTING_READ(FORCEWAKE);
|
||||
}
|
||||
|
||||
void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
int loop = 500;
|
||||
u32 fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
|
||||
while (fifo < 20 && loop--) {
|
||||
udelay(10);
|
||||
fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
|
||||
}
|
||||
}
|
||||
|
||||
static int i915_drm_freeze(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
@ -956,6 +956,7 @@ extern struct drm_ioctl_desc i915_ioctls[];
|
||||
extern int i915_max_ioctl;
|
||||
extern unsigned int i915_fbpercrtc;
|
||||
extern unsigned int i915_powersave;
|
||||
extern unsigned int i915_semaphores;
|
||||
extern unsigned int i915_lvds_downclock;
|
||||
extern unsigned int i915_panel_use_ssc;
|
||||
extern unsigned int i915_enable_rc6;
|
||||
@ -1177,6 +1178,9 @@ void i915_gem_detach_phys_object(struct drm_device *dev,
|
||||
void i915_gem_free_all_phys_object(struct drm_device *dev);
|
||||
void i915_gem_release(struct drm_device *dev, struct drm_file *file);
|
||||
|
||||
uint32_t
|
||||
i915_gem_get_unfenced_gtt_alignment(struct drm_i915_gem_object *obj);
|
||||
|
||||
/* i915_gem_gtt.c */
|
||||
void i915_gem_restore_gtt_mappings(struct drm_device *dev);
|
||||
int __must_check i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj);
|
||||
@ -1353,22 +1357,32 @@ __i915_write(64, q)
|
||||
* must be set to prevent GT core from power down and stale values being
|
||||
* returned.
|
||||
*/
|
||||
void __gen6_force_wake_get(struct drm_i915_private *dev_priv);
|
||||
void __gen6_force_wake_put (struct drm_i915_private *dev_priv);
|
||||
static inline u32 i915_safe_read(struct drm_i915_private *dev_priv, u32 reg)
|
||||
void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv);
|
||||
void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv);
|
||||
void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv);
|
||||
|
||||
static inline u32 i915_gt_read(struct drm_i915_private *dev_priv, u32 reg)
|
||||
{
|
||||
u32 val;
|
||||
|
||||
if (dev_priv->info->gen >= 6) {
|
||||
__gen6_force_wake_get(dev_priv);
|
||||
__gen6_gt_force_wake_get(dev_priv);
|
||||
val = I915_READ(reg);
|
||||
__gen6_force_wake_put(dev_priv);
|
||||
__gen6_gt_force_wake_put(dev_priv);
|
||||
} else
|
||||
val = I915_READ(reg);
|
||||
|
||||
return val;
|
||||
}
|
||||
|
||||
static inline void i915_gt_write(struct drm_i915_private *dev_priv,
|
||||
u32 reg, u32 val)
|
||||
{
|
||||
if (dev_priv->info->gen >= 6)
|
||||
__gen6_gt_wait_for_fifo(dev_priv);
|
||||
I915_WRITE(reg, val);
|
||||
}
|
||||
|
||||
static inline void
|
||||
i915_write(struct drm_i915_private *dev_priv, u32 reg, u64 val, int len)
|
||||
{
|
||||
|
@ -1398,7 +1398,7 @@ i915_gem_get_gtt_alignment(struct drm_i915_gem_object *obj)
|
||||
* Return the required GTT alignment for an object, only taking into account
|
||||
* unfenced tiled surface requirements.
|
||||
*/
|
||||
static uint32_t
|
||||
uint32_t
|
||||
i915_gem_get_unfenced_gtt_alignment(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
struct drm_device *dev = obj->base.dev;
|
||||
|
@ -772,8 +772,8 @@ i915_gem_execbuffer_sync_rings(struct drm_i915_gem_object *obj,
|
||||
if (from == NULL || to == from)
|
||||
return 0;
|
||||
|
||||
/* XXX gpu semaphores are currently causing hard hangs on SNB mobile */
|
||||
if (INTEL_INFO(obj->base.dev)->gen < 6 || IS_MOBILE(obj->base.dev))
|
||||
/* XXX gpu semaphores are implicated in various hard hangs on SNB */
|
||||
if (INTEL_INFO(obj->base.dev)->gen < 6 || !i915_semaphores)
|
||||
return i915_gem_object_wait_rendering(obj, true);
|
||||
|
||||
idx = intel_ring_sync_index(from, to);
|
||||
|
@ -184,7 +184,7 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
|
||||
static bool
|
||||
i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
|
||||
{
|
||||
int tile_width, tile_height;
|
||||
int tile_width;
|
||||
|
||||
/* Linear is always fine */
|
||||
if (tiling_mode == I915_TILING_NONE)
|
||||
@ -215,20 +215,6 @@ i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
|
||||
}
|
||||
}
|
||||
|
||||
if (IS_GEN2(dev) ||
|
||||
(tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev)))
|
||||
tile_height = 32;
|
||||
else
|
||||
tile_height = 8;
|
||||
/* i8xx is strange: It has 2 interleaved rows of tiles, so needs an even
|
||||
* number of tile rows. */
|
||||
if (IS_GEN2(dev))
|
||||
tile_height *= 2;
|
||||
|
||||
/* Size needs to be aligned to a full tile row */
|
||||
if (size & (tile_height * stride - 1))
|
||||
return false;
|
||||
|
||||
/* 965+ just needs multiples of tile width */
|
||||
if (INTEL_INFO(dev)->gen >= 4) {
|
||||
if (stride & (tile_width - 1))
|
||||
@ -363,14 +349,27 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
|
||||
(obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end &&
|
||||
i915_gem_object_fence_ok(obj, args->tiling_mode));
|
||||
|
||||
obj->tiling_changed = true;
|
||||
obj->tiling_mode = args->tiling_mode;
|
||||
obj->stride = args->stride;
|
||||
/* Rebind if we need a change of alignment */
|
||||
if (!obj->map_and_fenceable) {
|
||||
u32 unfenced_alignment =
|
||||
i915_gem_get_unfenced_gtt_alignment(obj);
|
||||
if (obj->gtt_offset & (unfenced_alignment - 1))
|
||||
ret = i915_gem_object_unbind(obj);
|
||||
}
|
||||
|
||||
if (ret == 0) {
|
||||
obj->tiling_changed = true;
|
||||
obj->tiling_mode = args->tiling_mode;
|
||||
obj->stride = args->stride;
|
||||
}
|
||||
}
|
||||
/* we have to maintain this existing ABI... */
|
||||
args->stride = obj->stride;
|
||||
args->tiling_mode = obj->tiling_mode;
|
||||
drm_gem_object_unreference(&obj->base);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -3261,6 +3261,8 @@
|
||||
#define FORCEWAKE 0xA18C
|
||||
#define FORCEWAKE_ACK 0x130090
|
||||
|
||||
#define GT_FIFO_FREE_ENTRIES 0x120008
|
||||
|
||||
#define GEN6_RPNSWREQ 0xA008
|
||||
#define GEN6_TURBO_DISABLE (1<<31)
|
||||
#define GEN6_FREQUENCY(x) ((x)<<25)
|
||||
|
@ -1219,7 +1219,7 @@ static void sandybridge_blit_fbc_update(struct drm_device *dev)
|
||||
u32 blt_ecoskpd;
|
||||
|
||||
/* Make sure blitter notifies FBC of writes */
|
||||
__gen6_force_wake_get(dev_priv);
|
||||
__gen6_gt_force_wake_get(dev_priv);
|
||||
blt_ecoskpd = I915_READ(GEN6_BLITTER_ECOSKPD);
|
||||
blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY <<
|
||||
GEN6_BLITTER_LOCK_SHIFT;
|
||||
@ -1230,7 +1230,7 @@ static void sandybridge_blit_fbc_update(struct drm_device *dev)
|
||||
GEN6_BLITTER_LOCK_SHIFT);
|
||||
I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
|
||||
POSTING_READ(GEN6_BLITTER_ECOSKPD);
|
||||
__gen6_force_wake_put(dev_priv);
|
||||
__gen6_gt_force_wake_put(dev_priv);
|
||||
}
|
||||
|
||||
static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
|
||||
@ -6282,7 +6282,7 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv)
|
||||
* userspace...
|
||||
*/
|
||||
I915_WRITE(GEN6_RC_STATE, 0);
|
||||
__gen6_force_wake_get(dev_priv);
|
||||
__gen6_gt_force_wake_get(dev_priv);
|
||||
|
||||
/* disable the counters and set deterministic thresholds */
|
||||
I915_WRITE(GEN6_RC_CONTROL, 0);
|
||||
@ -6380,7 +6380,7 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv)
|
||||
/* enable all PM interrupts */
|
||||
I915_WRITE(GEN6_PMINTRMSK, 0);
|
||||
|
||||
__gen6_force_wake_put(dev_priv);
|
||||
__gen6_gt_force_wake_put(dev_priv);
|
||||
}
|
||||
|
||||
void intel_enable_clock_gating(struct drm_device *dev)
|
||||
|
@ -14,22 +14,23 @@ struct intel_hw_status_page {
|
||||
struct drm_i915_gem_object *obj;
|
||||
};
|
||||
|
||||
#define I915_RING_READ(reg) i915_safe_read(dev_priv, reg)
|
||||
#define I915_RING_READ(reg) i915_gt_read(dev_priv, reg)
|
||||
#define I915_RING_WRITE(reg, val) i915_gt_write(dev_priv, reg, val)
|
||||
|
||||
#define I915_READ_TAIL(ring) I915_RING_READ(RING_TAIL((ring)->mmio_base))
|
||||
#define I915_WRITE_TAIL(ring, val) I915_WRITE(RING_TAIL((ring)->mmio_base), val)
|
||||
#define I915_WRITE_TAIL(ring, val) I915_RING_WRITE(RING_TAIL((ring)->mmio_base), val)
|
||||
|
||||
#define I915_READ_START(ring) I915_RING_READ(RING_START((ring)->mmio_base))
|
||||
#define I915_WRITE_START(ring, val) I915_WRITE(RING_START((ring)->mmio_base), val)
|
||||
#define I915_WRITE_START(ring, val) I915_RING_WRITE(RING_START((ring)->mmio_base), val)
|
||||
|
||||
#define I915_READ_HEAD(ring) I915_RING_READ(RING_HEAD((ring)->mmio_base))
|
||||
#define I915_WRITE_HEAD(ring, val) I915_WRITE(RING_HEAD((ring)->mmio_base), val)
|
||||
#define I915_WRITE_HEAD(ring, val) I915_RING_WRITE(RING_HEAD((ring)->mmio_base), val)
|
||||
|
||||
#define I915_READ_CTL(ring) I915_RING_READ(RING_CTL((ring)->mmio_base))
|
||||
#define I915_WRITE_CTL(ring, val) I915_WRITE(RING_CTL((ring)->mmio_base), val)
|
||||
#define I915_WRITE_CTL(ring, val) I915_RING_WRITE(RING_CTL((ring)->mmio_base), val)
|
||||
|
||||
#define I915_WRITE_IMR(ring, val) I915_WRITE(RING_IMR((ring)->mmio_base), val)
|
||||
#define I915_READ_IMR(ring) I915_RING_READ(RING_IMR((ring)->mmio_base))
|
||||
#define I915_WRITE_IMR(ring, val) I915_RING_WRITE(RING_IMR((ring)->mmio_base), val)
|
||||
|
||||
#define I915_READ_NOPID(ring) I915_RING_READ(RING_NOPID((ring)->mmio_base))
|
||||
#define I915_READ_SYNC_0(ring) I915_RING_READ(RING_SYNC_0((ring)->mmio_base))
|
||||
|
@ -725,8 +725,10 @@ nouveau_vram_manager_new(struct ttm_mem_type_manager *man,
|
||||
ret = vram->get(dev, mem->num_pages << PAGE_SHIFT,
|
||||
mem->page_alignment << PAGE_SHIFT, size_nc,
|
||||
(nvbo->tile_flags >> 8) & 0xff, &node);
|
||||
if (ret)
|
||||
return ret;
|
||||
if (ret) {
|
||||
mem->mm_node = NULL;
|
||||
return (ret == -ENOSPC) ? 0 : ret;
|
||||
}
|
||||
|
||||
node->page_shift = 12;
|
||||
if (nvbo->vma.node)
|
||||
|
@ -123,7 +123,7 @@ nouveau_mm_get(struct nouveau_mm *rmm, int type, u32 size, u32 size_nc,
|
||||
return 0;
|
||||
}
|
||||
|
||||
return -ENOMEM;
|
||||
return -ENOSPC;
|
||||
}
|
||||
|
||||
int
|
||||
|
@ -403,16 +403,24 @@ nv50_instmem_unmap(struct nouveau_gpuobj *gpuobj)
|
||||
void
|
||||
nv50_instmem_flush(struct drm_device *dev)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
|
||||
spin_lock(&dev_priv->ramin_lock);
|
||||
nv_wr32(dev, 0x00330c, 0x00000001);
|
||||
if (!nv_wait(dev, 0x00330c, 0x00000002, 0x00000000))
|
||||
NV_ERROR(dev, "PRAMIN flush timeout\n");
|
||||
spin_unlock(&dev_priv->ramin_lock);
|
||||
}
|
||||
|
||||
void
|
||||
nv84_instmem_flush(struct drm_device *dev)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
|
||||
spin_lock(&dev_priv->ramin_lock);
|
||||
nv_wr32(dev, 0x070000, 0x00000001);
|
||||
if (!nv_wait(dev, 0x070000, 0x00000002, 0x00000000))
|
||||
NV_ERROR(dev, "PRAMIN flush timeout\n");
|
||||
spin_unlock(&dev_priv->ramin_lock);
|
||||
}
|
||||
|
||||
|
@ -169,7 +169,11 @@ nv50_vm_flush(struct nouveau_vm *vm)
|
||||
void
|
||||
nv50_vm_flush_engine(struct drm_device *dev, int engine)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
|
||||
spin_lock(&dev_priv->ramin_lock);
|
||||
nv_wr32(dev, 0x100c80, (engine << 16) | 1);
|
||||
if (!nv_wait(dev, 0x100c80, 0x00000001, 0x00000000))
|
||||
NV_ERROR(dev, "vm flush timeout: engine %d\n", engine);
|
||||
spin_unlock(&dev_priv->ramin_lock);
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user