mirror of
https://github.com/torvalds/linux.git
synced 2024-12-25 12:21:37 +00:00
drm/i915: Fix dynamic allocation of physical handles
A single object may be referenced by multiple registers fundamentally breaking the static allotment of ids in the current design. When the object is used the second time, the physical address of the first assignment is relinquished and a second one granted. However, the hardware is still reading (and possibly writing) to the old physical address now returned to the system. Eventually hilarity will ensue, but in the short term, it just means that cursors are broken when using more than one pipe. v2: Fix up leak of pci handle when handling an error during attachment, and avoid a double kmap/kunmap. (Ville) Rebase against -fixes. v3: And fix the error handling added in v2 (Ville) Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=77351 Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Ville Syrjälä <ville.syrjala@linux.intel.com> Cc: Jani Nikula <jani.nikula@linux.intel.com> Cc: stable@vger.kernel.org Reviewed-by: Ville Syrjälä <ville.syrjala@linux.intel.com> Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
This commit is contained in:
parent
c7208164e6
commit
00731155a7
@ -1833,7 +1833,6 @@ int i915_driver_unload(struct drm_device *dev)
|
||||
flush_workqueue(dev_priv->wq);
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
i915_gem_free_all_phys_object(dev);
|
||||
i915_gem_cleanup_ringbuffer(dev);
|
||||
i915_gem_context_fini(dev);
|
||||
WARN_ON(dev_priv->mm.aliasing_ppgtt);
|
||||
|
@ -242,18 +242,6 @@ struct intel_ddi_plls {
|
||||
#define WATCH_LISTS 0
|
||||
#define WATCH_GTT 0
|
||||
|
||||
#define I915_GEM_PHYS_CURSOR_0 1
|
||||
#define I915_GEM_PHYS_CURSOR_1 2
|
||||
#define I915_GEM_PHYS_OVERLAY_REGS 3
|
||||
#define I915_MAX_PHYS_OBJECT (I915_GEM_PHYS_OVERLAY_REGS)
|
||||
|
||||
struct drm_i915_gem_phys_object {
|
||||
int id;
|
||||
struct page **page_list;
|
||||
drm_dma_handle_t *handle;
|
||||
struct drm_i915_gem_object *cur_obj;
|
||||
};
|
||||
|
||||
struct opregion_header;
|
||||
struct opregion_acpi;
|
||||
struct opregion_swsci;
|
||||
@ -1187,9 +1175,6 @@ struct i915_gem_mm {
|
||||
/** Bit 6 swizzling required for Y tiling */
|
||||
uint32_t bit_6_swizzle_y;
|
||||
|
||||
/* storage for physical objects */
|
||||
struct drm_i915_gem_phys_object *phys_objs[I915_MAX_PHYS_OBJECT];
|
||||
|
||||
/* accounting, useful for userland debugging */
|
||||
spinlock_t object_stat_lock;
|
||||
size_t object_memory;
|
||||
@ -1769,7 +1754,7 @@ struct drm_i915_gem_object {
|
||||
struct drm_file *pin_filp;
|
||||
|
||||
/** for phy allocated objects */
|
||||
struct drm_i915_gem_phys_object *phys_obj;
|
||||
drm_dma_handle_t *phys_handle;
|
||||
};
|
||||
|
||||
#define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
|
||||
@ -2334,13 +2319,8 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
|
||||
u32 alignment,
|
||||
struct intel_ring_buffer *pipelined);
|
||||
void i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj);
|
||||
int i915_gem_attach_phys_object(struct drm_device *dev,
|
||||
struct drm_i915_gem_object *obj,
|
||||
int id,
|
||||
int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
|
||||
int align);
|
||||
void i915_gem_detach_phys_object(struct drm_device *dev,
|
||||
struct drm_i915_gem_object *obj);
|
||||
void i915_gem_free_all_phys_object(struct drm_device *dev);
|
||||
int i915_gem_open(struct drm_device *dev, struct drm_file *file);
|
||||
void i915_gem_release(struct drm_device *dev, struct drm_file *file);
|
||||
|
||||
|
@ -43,10 +43,6 @@ static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *o
|
||||
static __must_check int
|
||||
i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
|
||||
bool readonly);
|
||||
static int i915_gem_phys_pwrite(struct drm_device *dev,
|
||||
struct drm_i915_gem_object *obj,
|
||||
struct drm_i915_gem_pwrite *args,
|
||||
struct drm_file *file);
|
||||
|
||||
static void i915_gem_write_fence(struct drm_device *dev, int reg,
|
||||
struct drm_i915_gem_object *obj);
|
||||
@ -209,6 +205,128 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void i915_gem_object_detach_phys(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
drm_dma_handle_t *phys = obj->phys_handle;
|
||||
|
||||
if (!phys)
|
||||
return;
|
||||
|
||||
if (obj->madv == I915_MADV_WILLNEED) {
|
||||
struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
|
||||
char *vaddr = phys->vaddr;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
|
||||
struct page *page = shmem_read_mapping_page(mapping, i);
|
||||
if (!IS_ERR(page)) {
|
||||
char *dst = kmap_atomic(page);
|
||||
memcpy(dst, vaddr, PAGE_SIZE);
|
||||
drm_clflush_virt_range(dst, PAGE_SIZE);
|
||||
kunmap_atomic(dst);
|
||||
|
||||
set_page_dirty(page);
|
||||
mark_page_accessed(page);
|
||||
page_cache_release(page);
|
||||
}
|
||||
vaddr += PAGE_SIZE;
|
||||
}
|
||||
i915_gem_chipset_flush(obj->base.dev);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_X86
|
||||
set_memory_wb((unsigned long)phys->vaddr, phys->size / PAGE_SIZE);
|
||||
#endif
|
||||
drm_pci_free(obj->base.dev, phys);
|
||||
obj->phys_handle = NULL;
|
||||
}
|
||||
|
||||
int
|
||||
i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
|
||||
int align)
|
||||
{
|
||||
drm_dma_handle_t *phys;
|
||||
struct address_space *mapping;
|
||||
char *vaddr;
|
||||
int i;
|
||||
|
||||
if (obj->phys_handle) {
|
||||
if ((unsigned long)obj->phys_handle->vaddr & (align -1))
|
||||
return -EBUSY;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (obj->madv != I915_MADV_WILLNEED)
|
||||
return -EFAULT;
|
||||
|
||||
if (obj->base.filp == NULL)
|
||||
return -EINVAL;
|
||||
|
||||
/* create a new object */
|
||||
phys = drm_pci_alloc(obj->base.dev, obj->base.size, align);
|
||||
if (!phys)
|
||||
return -ENOMEM;
|
||||
|
||||
vaddr = phys->vaddr;
|
||||
#ifdef CONFIG_X86
|
||||
set_memory_wc((unsigned long)vaddr, phys->size / PAGE_SIZE);
|
||||
#endif
|
||||
mapping = file_inode(obj->base.filp)->i_mapping;
|
||||
for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
|
||||
struct page *page;
|
||||
char *src;
|
||||
|
||||
page = shmem_read_mapping_page(mapping, i);
|
||||
if (IS_ERR(page)) {
|
||||
#ifdef CONFIG_X86
|
||||
set_memory_wb((unsigned long)phys->vaddr, phys->size / PAGE_SIZE);
|
||||
#endif
|
||||
drm_pci_free(obj->base.dev, phys);
|
||||
return PTR_ERR(page);
|
||||
}
|
||||
|
||||
src = kmap_atomic(page);
|
||||
memcpy(vaddr, src, PAGE_SIZE);
|
||||
kunmap_atomic(src);
|
||||
|
||||
mark_page_accessed(page);
|
||||
page_cache_release(page);
|
||||
|
||||
vaddr += PAGE_SIZE;
|
||||
}
|
||||
|
||||
obj->phys_handle = phys;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
|
||||
struct drm_i915_gem_pwrite *args,
|
||||
struct drm_file *file_priv)
|
||||
{
|
||||
struct drm_device *dev = obj->base.dev;
|
||||
void *vaddr = obj->phys_handle->vaddr + args->offset;
|
||||
char __user *user_data = to_user_ptr(args->data_ptr);
|
||||
|
||||
if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
|
||||
unsigned long unwritten;
|
||||
|
||||
/* The physical object once assigned is fixed for the lifetime
|
||||
* of the obj, so we can safely drop the lock and continue
|
||||
* to access vaddr.
|
||||
*/
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
unwritten = copy_from_user(vaddr, user_data, args->size);
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
if (unwritten)
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
i915_gem_chipset_flush(dev);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void *i915_gem_object_alloc(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
@ -921,8 +1039,8 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
|
||||
* pread/pwrite currently are reading and writing from the CPU
|
||||
* perspective, requiring manual detiling by the client.
|
||||
*/
|
||||
if (obj->phys_obj) {
|
||||
ret = i915_gem_phys_pwrite(dev, obj, args, file);
|
||||
if (obj->phys_handle) {
|
||||
ret = i915_gem_phys_pwrite(obj, args, file);
|
||||
goto out;
|
||||
}
|
||||
|
||||
@ -4163,9 +4281,6 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
|
||||
|
||||
trace_i915_gem_object_destroy(obj);
|
||||
|
||||
if (obj->phys_obj)
|
||||
i915_gem_detach_phys_object(dev, obj);
|
||||
|
||||
list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) {
|
||||
int ret;
|
||||
|
||||
@ -4183,6 +4298,8 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
|
||||
}
|
||||
}
|
||||
|
||||
i915_gem_object_detach_phys(obj);
|
||||
|
||||
/* Stolen objects don't hold a ref, but do hold pin count. Fix that up
|
||||
* before progressing. */
|
||||
if (obj->stolen)
|
||||
@ -4646,190 +4763,6 @@ i915_gem_load(struct drm_device *dev)
|
||||
register_shrinker(&dev_priv->mm.inactive_shrinker);
|
||||
}
|
||||
|
||||
/*
|
||||
* Create a physically contiguous memory object for this object
|
||||
* e.g. for cursor + overlay regs
|
||||
*/
|
||||
static int i915_gem_init_phys_object(struct drm_device *dev,
|
||||
int id, int size, int align)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_gem_phys_object *phys_obj;
|
||||
int ret;
|
||||
|
||||
if (dev_priv->mm.phys_objs[id - 1] || !size)
|
||||
return 0;
|
||||
|
||||
phys_obj = kzalloc(sizeof(*phys_obj), GFP_KERNEL);
|
||||
if (!phys_obj)
|
||||
return -ENOMEM;
|
||||
|
||||
phys_obj->id = id;
|
||||
|
||||
phys_obj->handle = drm_pci_alloc(dev, size, align);
|
||||
if (!phys_obj->handle) {
|
||||
ret = -ENOMEM;
|
||||
goto kfree_obj;
|
||||
}
|
||||
#ifdef CONFIG_X86
|
||||
set_memory_wc((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
|
||||
#endif
|
||||
|
||||
dev_priv->mm.phys_objs[id - 1] = phys_obj;
|
||||
|
||||
return 0;
|
||||
kfree_obj:
|
||||
kfree(phys_obj);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void i915_gem_free_phys_object(struct drm_device *dev, int id)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_gem_phys_object *phys_obj;
|
||||
|
||||
if (!dev_priv->mm.phys_objs[id - 1])
|
||||
return;
|
||||
|
||||
phys_obj = dev_priv->mm.phys_objs[id - 1];
|
||||
if (phys_obj->cur_obj) {
|
||||
i915_gem_detach_phys_object(dev, phys_obj->cur_obj);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_X86
|
||||
set_memory_wb((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
|
||||
#endif
|
||||
drm_pci_free(dev, phys_obj->handle);
|
||||
kfree(phys_obj);
|
||||
dev_priv->mm.phys_objs[id - 1] = NULL;
|
||||
}
|
||||
|
||||
void i915_gem_free_all_phys_object(struct drm_device *dev)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = I915_GEM_PHYS_CURSOR_0; i <= I915_MAX_PHYS_OBJECT; i++)
|
||||
i915_gem_free_phys_object(dev, i);
|
||||
}
|
||||
|
||||
void i915_gem_detach_phys_object(struct drm_device *dev,
|
||||
struct drm_i915_gem_object *obj)
|
||||
{
|
||||
struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
|
||||
char *vaddr;
|
||||
int i;
|
||||
int page_count;
|
||||
|
||||
if (!obj->phys_obj)
|
||||
return;
|
||||
vaddr = obj->phys_obj->handle->vaddr;
|
||||
|
||||
page_count = obj->base.size / PAGE_SIZE;
|
||||
for (i = 0; i < page_count; i++) {
|
||||
struct page *page = shmem_read_mapping_page(mapping, i);
|
||||
if (!IS_ERR(page)) {
|
||||
char *dst = kmap_atomic(page);
|
||||
memcpy(dst, vaddr + i*PAGE_SIZE, PAGE_SIZE);
|
||||
kunmap_atomic(dst);
|
||||
|
||||
drm_clflush_pages(&page, 1);
|
||||
|
||||
set_page_dirty(page);
|
||||
mark_page_accessed(page);
|
||||
page_cache_release(page);
|
||||
}
|
||||
}
|
||||
i915_gem_chipset_flush(dev);
|
||||
|
||||
obj->phys_obj->cur_obj = NULL;
|
||||
obj->phys_obj = NULL;
|
||||
}
|
||||
|
||||
int
|
||||
i915_gem_attach_phys_object(struct drm_device *dev,
|
||||
struct drm_i915_gem_object *obj,
|
||||
int id,
|
||||
int align)
|
||||
{
|
||||
struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
int ret = 0;
|
||||
int page_count;
|
||||
int i;
|
||||
|
||||
if (id > I915_MAX_PHYS_OBJECT)
|
||||
return -EINVAL;
|
||||
|
||||
if (obj->phys_obj) {
|
||||
if (obj->phys_obj->id == id)
|
||||
return 0;
|
||||
i915_gem_detach_phys_object(dev, obj);
|
||||
}
|
||||
|
||||
/* create a new object */
|
||||
if (!dev_priv->mm.phys_objs[id - 1]) {
|
||||
ret = i915_gem_init_phys_object(dev, id,
|
||||
obj->base.size, align);
|
||||
if (ret) {
|
||||
DRM_ERROR("failed to init phys object %d size: %zu\n",
|
||||
id, obj->base.size);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
/* bind to the object */
|
||||
obj->phys_obj = dev_priv->mm.phys_objs[id - 1];
|
||||
obj->phys_obj->cur_obj = obj;
|
||||
|
||||
page_count = obj->base.size / PAGE_SIZE;
|
||||
|
||||
for (i = 0; i < page_count; i++) {
|
||||
struct page *page;
|
||||
char *dst, *src;
|
||||
|
||||
page = shmem_read_mapping_page(mapping, i);
|
||||
if (IS_ERR(page))
|
||||
return PTR_ERR(page);
|
||||
|
||||
src = kmap_atomic(page);
|
||||
dst = obj->phys_obj->handle->vaddr + (i * PAGE_SIZE);
|
||||
memcpy(dst, src, PAGE_SIZE);
|
||||
kunmap_atomic(src);
|
||||
|
||||
mark_page_accessed(page);
|
||||
page_cache_release(page);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
i915_gem_phys_pwrite(struct drm_device *dev,
|
||||
struct drm_i915_gem_object *obj,
|
||||
struct drm_i915_gem_pwrite *args,
|
||||
struct drm_file *file_priv)
|
||||
{
|
||||
void *vaddr = obj->phys_obj->handle->vaddr + args->offset;
|
||||
char __user *user_data = to_user_ptr(args->data_ptr);
|
||||
|
||||
if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
|
||||
unsigned long unwritten;
|
||||
|
||||
/* The physical object once assigned is fixed for the lifetime
|
||||
* of the obj, so we can safely drop the lock and continue
|
||||
* to access vaddr.
|
||||
*/
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
unwritten = copy_from_user(vaddr, user_data, args->size);
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
if (unwritten)
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
i915_gem_chipset_flush(dev);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void i915_gem_release(struct drm_device *dev, struct drm_file *file)
|
||||
{
|
||||
struct drm_i915_file_private *file_priv = file->driver_priv;
|
||||
|
@ -7825,14 +7825,12 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
|
||||
addr = i915_gem_obj_ggtt_offset(obj);
|
||||
} else {
|
||||
int align = IS_I830(dev) ? 16 * 1024 : 256;
|
||||
ret = i915_gem_attach_phys_object(dev, obj,
|
||||
(intel_crtc->pipe == 0) ? I915_GEM_PHYS_CURSOR_0 : I915_GEM_PHYS_CURSOR_1,
|
||||
align);
|
||||
ret = i915_gem_object_attach_phys(obj, align);
|
||||
if (ret) {
|
||||
DRM_DEBUG_KMS("failed to attach phys object\n");
|
||||
goto fail_locked;
|
||||
}
|
||||
addr = obj->phys_obj->handle->busaddr;
|
||||
addr = obj->phys_handle->busaddr;
|
||||
}
|
||||
|
||||
if (IS_GEN2(dev))
|
||||
@ -7840,10 +7838,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
|
||||
|
||||
finish:
|
||||
if (intel_crtc->cursor_bo) {
|
||||
if (INTEL_INFO(dev)->cursor_needs_physical) {
|
||||
if (intel_crtc->cursor_bo != obj)
|
||||
i915_gem_detach_phys_object(dev, intel_crtc->cursor_bo);
|
||||
} else
|
||||
if (!INTEL_INFO(dev)->cursor_needs_physical)
|
||||
i915_gem_object_unpin_from_display_plane(intel_crtc->cursor_bo);
|
||||
drm_gem_object_unreference(&intel_crtc->cursor_bo->base);
|
||||
}
|
||||
|
@ -193,7 +193,7 @@ intel_overlay_map_regs(struct intel_overlay *overlay)
|
||||
struct overlay_registers __iomem *regs;
|
||||
|
||||
if (OVERLAY_NEEDS_PHYSICAL(overlay->dev))
|
||||
regs = (struct overlay_registers __iomem *)overlay->reg_bo->phys_obj->handle->vaddr;
|
||||
regs = (struct overlay_registers __iomem *)overlay->reg_bo->phys_handle->vaddr;
|
||||
else
|
||||
regs = io_mapping_map_wc(dev_priv->gtt.mappable,
|
||||
i915_gem_obj_ggtt_offset(overlay->reg_bo));
|
||||
@ -1340,14 +1340,12 @@ void intel_setup_overlay(struct drm_device *dev)
|
||||
overlay->reg_bo = reg_bo;
|
||||
|
||||
if (OVERLAY_NEEDS_PHYSICAL(dev)) {
|
||||
ret = i915_gem_attach_phys_object(dev, reg_bo,
|
||||
I915_GEM_PHYS_OVERLAY_REGS,
|
||||
PAGE_SIZE);
|
||||
ret = i915_gem_object_attach_phys(reg_bo, PAGE_SIZE);
|
||||
if (ret) {
|
||||
DRM_ERROR("failed to attach phys overlay regs\n");
|
||||
goto out_free_bo;
|
||||
}
|
||||
overlay->flip_addr = reg_bo->phys_obj->handle->busaddr;
|
||||
overlay->flip_addr = reg_bo->phys_handle->busaddr;
|
||||
} else {
|
||||
ret = i915_gem_obj_ggtt_pin(reg_bo, PAGE_SIZE, PIN_MAPPABLE);
|
||||
if (ret) {
|
||||
@ -1428,7 +1426,7 @@ intel_overlay_map_regs_atomic(struct intel_overlay *overlay)
|
||||
/* Cast to make sparse happy, but it's wc memory anyway, so
|
||||
* equivalent to the wc io mapping on X86. */
|
||||
regs = (struct overlay_registers __iomem *)
|
||||
overlay->reg_bo->phys_obj->handle->vaddr;
|
||||
overlay->reg_bo->phys_handle->vaddr;
|
||||
else
|
||||
regs = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
|
||||
i915_gem_obj_ggtt_offset(overlay->reg_bo));
|
||||
@ -1462,7 +1460,7 @@ intel_overlay_capture_error_state(struct drm_device *dev)
|
||||
error->dovsta = I915_READ(DOVSTA);
|
||||
error->isr = I915_READ(ISR);
|
||||
if (OVERLAY_NEEDS_PHYSICAL(overlay->dev))
|
||||
error->base = (__force long)overlay->reg_bo->phys_obj->handle->vaddr;
|
||||
error->base = (__force long)overlay->reg_bo->phys_handle->vaddr;
|
||||
else
|
||||
error->base = i915_gem_obj_ggtt_offset(overlay->reg_bo);
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user