mirror of
https://github.com/torvalds/linux.git
synced 2024-12-24 11:51:27 +00:00
drm/i915: fixup __iomem mixups in ringbuffer.c
Two things: - ring->virtual start is an __iomem pointer, treat it accordingly. - dev_priv->status_page.page_addr is now always a cpu addr, no pointer casting needed for that. Take the opportunity to remove the unnecessary drm indirection when setting up the ringbuffer iomapping. v2: Add a compiler barrier before reading the hw status page. Acked-by: Jesse Barnes <jbarnes@virtuousgeek.org> Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk> Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
This commit is contained in:
parent
316d388450
commit
4225d0f219
@ -256,7 +256,7 @@ static int i915_dma_resume(struct drm_device * dev)
|
|||||||
|
|
||||||
DRM_DEBUG_DRIVER("%s\n", __func__);
|
DRM_DEBUG_DRIVER("%s\n", __func__);
|
||||||
|
|
||||||
if (ring->map.handle == NULL) {
|
if (ring->virtual_start == NULL) {
|
||||||
DRM_ERROR("can not ioremap virtual address for"
|
DRM_ERROR("can not ioremap virtual address for"
|
||||||
" ring buffer\n");
|
" ring buffer\n");
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
@ -977,20 +977,14 @@ static int intel_init_ring_buffer(struct drm_device *dev,
|
|||||||
if (ret)
|
if (ret)
|
||||||
goto err_unref;
|
goto err_unref;
|
||||||
|
|
||||||
ring->map.size = ring->size;
|
ring->virtual_start = ioremap_wc(dev->agp->base + obj->gtt_offset,
|
||||||
ring->map.offset = dev->agp->base + obj->gtt_offset;
|
ring->size);
|
||||||
ring->map.type = 0;
|
if (ring->virtual_start == NULL) {
|
||||||
ring->map.flags = 0;
|
|
||||||
ring->map.mtrr = 0;
|
|
||||||
|
|
||||||
drm_core_ioremap_wc(&ring->map, dev);
|
|
||||||
if (ring->map.handle == NULL) {
|
|
||||||
DRM_ERROR("Failed to map ringbuffer.\n");
|
DRM_ERROR("Failed to map ringbuffer.\n");
|
||||||
ret = -EINVAL;
|
ret = -EINVAL;
|
||||||
goto err_unpin;
|
goto err_unpin;
|
||||||
}
|
}
|
||||||
|
|
||||||
ring->virtual_start = ring->map.handle;
|
|
||||||
ret = ring->init(ring);
|
ret = ring->init(ring);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto err_unmap;
|
goto err_unmap;
|
||||||
@ -1006,7 +1000,7 @@ static int intel_init_ring_buffer(struct drm_device *dev,
|
|||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
err_unmap:
|
err_unmap:
|
||||||
drm_core_ioremapfree(&ring->map, dev);
|
iounmap(ring->virtual_start);
|
||||||
err_unpin:
|
err_unpin:
|
||||||
i915_gem_object_unpin(obj);
|
i915_gem_object_unpin(obj);
|
||||||
err_unref:
|
err_unref:
|
||||||
@ -1034,7 +1028,7 @@ void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring)
|
|||||||
|
|
||||||
I915_WRITE_CTL(ring, 0);
|
I915_WRITE_CTL(ring, 0);
|
||||||
|
|
||||||
drm_core_ioremapfree(&ring->map, ring->dev);
|
iounmap(ring->virtual_start);
|
||||||
|
|
||||||
i915_gem_object_unpin(ring->obj);
|
i915_gem_object_unpin(ring->obj);
|
||||||
drm_gem_object_unreference(&ring->obj->base);
|
drm_gem_object_unreference(&ring->obj->base);
|
||||||
@ -1048,7 +1042,7 @@ void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring)
|
|||||||
|
|
||||||
static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring)
|
static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring)
|
||||||
{
|
{
|
||||||
unsigned int *virt;
|
uint32_t __iomem *virt;
|
||||||
int rem = ring->size - ring->tail;
|
int rem = ring->size - ring->tail;
|
||||||
|
|
||||||
if (ring->space < rem) {
|
if (ring->space < rem) {
|
||||||
@ -1057,12 +1051,10 @@ static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring)
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
virt = (unsigned int *)(ring->virtual_start + ring->tail);
|
virt = ring->virtual_start + ring->tail;
|
||||||
rem /= 8;
|
rem /= 4;
|
||||||
while (rem--) {
|
while (rem--)
|
||||||
*virt++ = MI_NOOP;
|
iowrite32(MI_NOOP, virt++);
|
||||||
*virt++ = MI_NOOP;
|
|
||||||
}
|
|
||||||
|
|
||||||
ring->tail = 0;
|
ring->tail = 0;
|
||||||
ring->space = ring_space(ring);
|
ring->space = ring_space(ring);
|
||||||
@ -1427,20 +1419,13 @@ int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
|
|||||||
if (IS_I830(ring->dev))
|
if (IS_I830(ring->dev))
|
||||||
ring->effective_size -= 128;
|
ring->effective_size -= 128;
|
||||||
|
|
||||||
ring->map.offset = start;
|
ring->virtual_start = ioremap_wc(start, size);
|
||||||
ring->map.size = size;
|
if (ring->virtual_start == NULL) {
|
||||||
ring->map.type = 0;
|
|
||||||
ring->map.flags = 0;
|
|
||||||
ring->map.mtrr = 0;
|
|
||||||
|
|
||||||
drm_core_ioremap_wc(&ring->map, dev);
|
|
||||||
if (ring->map.handle == NULL) {
|
|
||||||
DRM_ERROR("can not ioremap virtual address for"
|
DRM_ERROR("can not ioremap virtual address for"
|
||||||
" ring buffer\n");
|
" ring buffer\n");
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
||||||
ring->virtual_start = (void __force __iomem *)ring->map.handle;
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2,7 +2,7 @@
|
|||||||
#define _INTEL_RINGBUFFER_H_
|
#define _INTEL_RINGBUFFER_H_
|
||||||
|
|
||||||
struct intel_hw_status_page {
|
struct intel_hw_status_page {
|
||||||
u32 __iomem *page_addr;
|
u32 *page_addr;
|
||||||
unsigned int gfx_addr;
|
unsigned int gfx_addr;
|
||||||
struct drm_i915_gem_object *obj;
|
struct drm_i915_gem_object *obj;
|
||||||
};
|
};
|
||||||
@ -115,7 +115,6 @@ struct intel_ring_buffer {
|
|||||||
u32 outstanding_lazy_request;
|
u32 outstanding_lazy_request;
|
||||||
|
|
||||||
wait_queue_head_t irq_queue;
|
wait_queue_head_t irq_queue;
|
||||||
drm_local_map_t map;
|
|
||||||
|
|
||||||
void *private;
|
void *private;
|
||||||
};
|
};
|
||||||
@ -149,7 +148,9 @@ static inline u32
|
|||||||
intel_read_status_page(struct intel_ring_buffer *ring,
|
intel_read_status_page(struct intel_ring_buffer *ring,
|
||||||
int reg)
|
int reg)
|
||||||
{
|
{
|
||||||
return ioread32(ring->status_page.page_addr + reg);
|
/* Ensure that the compiler doesn't optimize away the load. */
|
||||||
|
barrier();
|
||||||
|
return ring->status_page.page_addr[reg];
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
Loading…
Reference in New Issue
Block a user