mirror of
https://github.com/torvalds/linux.git
synced 2024-12-24 11:51:27 +00:00
Merge tag 'vmwgfx-fixes-4.3-150924' of git://people.freedesktop.org/~thomash/linux into drm-fixes
Pull request of 2015-09-24 Vmwgfx fixes for 4.3: - A couple of uninitialized variable fixes by Christian Engelmayer - A TTM fix for a bug that causes problems with the new vmwgfx device init - A vmwgfx refcounting fix - A vmwgfx iomem caching fix - A DRM change to allow also control clients to read the drm driver version. * tag 'vmwgfx-fixes-4.3-150924' of git://people.freedesktop.org/~thomash/linux: drm: Allow also control clients to check the drm version drm/vmwgfx: Fix uninitialized return in vmw_kms_helper_dirty() drm/vmwgfx: Fix uninitialized return in vmw_cotable_unbind() drm/vmwgfx: Only build on X86 drm/ttm: Fix memory space allocation v2 drm/vmwgfx: Map the fifo as cached drm/vmwgfx: Fix up user_dmabuf refcounting
This commit is contained in:
commit
0a3579e39d
@ -520,7 +520,8 @@ EXPORT_SYMBOL(drm_ioctl_permit);
|
||||
|
||||
/** Ioctl table */
|
||||
static const struct drm_ioctl_desc drm_ioctls[] = {
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version, DRM_UNLOCKED|DRM_RENDER_ALLOW),
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version,
|
||||
DRM_UNLOCKED|DRM_RENDER_ALLOW|DRM_CONTROL_ALLOW),
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_GET_UNIQUE, drm_getunique, 0),
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_GET_MAGIC, drm_getmagic, 0),
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_IRQ_BUSID, drm_irq_by_busid, DRM_MASTER|DRM_ROOT_ONLY),
|
||||
|
@ -882,6 +882,8 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
|
||||
if (ret)
|
||||
return ret;
|
||||
man = &bdev->man[mem_type];
|
||||
if (!man->has_type || !man->use_type)
|
||||
continue;
|
||||
|
||||
type_ok = ttm_bo_mt_compatible(man, mem_type, place,
|
||||
&cur_flags);
|
||||
@ -889,6 +891,7 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
|
||||
if (!type_ok)
|
||||
continue;
|
||||
|
||||
type_found = true;
|
||||
cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
|
||||
cur_flags);
|
||||
/*
|
||||
@ -901,12 +904,10 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
|
||||
if (mem_type == TTM_PL_SYSTEM)
|
||||
break;
|
||||
|
||||
if (man->has_type && man->use_type) {
|
||||
type_found = true;
|
||||
ret = (*man->func->get_node)(man, bo, place, mem);
|
||||
if (unlikely(ret))
|
||||
return ret;
|
||||
}
|
||||
ret = (*man->func->get_node)(man, bo, place, mem);
|
||||
if (unlikely(ret))
|
||||
return ret;
|
||||
|
||||
if (mem->mm_node)
|
||||
break;
|
||||
}
|
||||
@ -917,9 +918,6 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (!type_found)
|
||||
return -EINVAL;
|
||||
|
||||
for (i = 0; i < placement->num_busy_placement; ++i) {
|
||||
const struct ttm_place *place = &placement->busy_placement[i];
|
||||
|
||||
@ -927,11 +925,12 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
|
||||
if (ret)
|
||||
return ret;
|
||||
man = &bdev->man[mem_type];
|
||||
if (!man->has_type)
|
||||
if (!man->has_type || !man->use_type)
|
||||
continue;
|
||||
if (!ttm_bo_mt_compatible(man, mem_type, place, &cur_flags))
|
||||
continue;
|
||||
|
||||
type_found = true;
|
||||
cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
|
||||
cur_flags);
|
||||
/*
|
||||
@ -957,8 +956,13 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
|
||||
if (ret == -ERESTARTSYS)
|
||||
has_erestartsys = true;
|
||||
}
|
||||
ret = (has_erestartsys) ? -ERESTARTSYS : -ENOMEM;
|
||||
return ret;
|
||||
|
||||
if (!type_found) {
|
||||
printk(KERN_ERR TTM_PFX "No compatible memory type found.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return (has_erestartsys) ? -ERESTARTSYS : -ENOMEM;
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_bo_mem_space);
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
config DRM_VMWGFX
|
||||
tristate "DRM driver for VMware Virtual GPU"
|
||||
depends on DRM && PCI
|
||||
depends on DRM && PCI && X86
|
||||
select FB_DEFERRED_IO
|
||||
select FB_CFB_FILLRECT
|
||||
select FB_CFB_COPYAREA
|
||||
|
@ -311,7 +311,6 @@ static int vmw_cotable_unbind(struct vmw_resource *res,
|
||||
struct vmw_private *dev_priv = res->dev_priv;
|
||||
struct ttm_buffer_object *bo = val_buf->bo;
|
||||
struct vmw_fence_obj *fence;
|
||||
int ret;
|
||||
|
||||
if (list_empty(&res->mob_head))
|
||||
return 0;
|
||||
@ -328,7 +327,7 @@ static int vmw_cotable_unbind(struct vmw_resource *res,
|
||||
if (likely(fence != NULL))
|
||||
vmw_fence_obj_unreference(&fence);
|
||||
|
||||
return ret;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -752,12 +752,8 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
|
||||
ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
|
||||
dev_priv->active_master = &dev_priv->fbdev_master;
|
||||
|
||||
|
||||
dev_priv->mmio_mtrr = arch_phys_wc_add(dev_priv->mmio_start,
|
||||
dev_priv->mmio_size);
|
||||
|
||||
dev_priv->mmio_virt = ioremap_wc(dev_priv->mmio_start,
|
||||
dev_priv->mmio_size);
|
||||
dev_priv->mmio_virt = ioremap_cache(dev_priv->mmio_start,
|
||||
dev_priv->mmio_size);
|
||||
|
||||
if (unlikely(dev_priv->mmio_virt == NULL)) {
|
||||
ret = -ENOMEM;
|
||||
@ -913,7 +909,6 @@ out_no_device:
|
||||
out_err4:
|
||||
iounmap(dev_priv->mmio_virt);
|
||||
out_err3:
|
||||
arch_phys_wc_del(dev_priv->mmio_mtrr);
|
||||
vmw_ttm_global_release(dev_priv);
|
||||
out_err0:
|
||||
for (i = vmw_res_context; i < vmw_res_max; ++i)
|
||||
@ -964,7 +959,6 @@ static int vmw_driver_unload(struct drm_device *dev)
|
||||
|
||||
ttm_object_device_release(&dev_priv->tdev);
|
||||
iounmap(dev_priv->mmio_virt);
|
||||
arch_phys_wc_del(dev_priv->mmio_mtrr);
|
||||
if (dev_priv->ctx.staged_bindings)
|
||||
vmw_binding_state_free(dev_priv->ctx.staged_bindings);
|
||||
vmw_ttm_global_release(dev_priv);
|
||||
|
@ -376,7 +376,6 @@ struct vmw_private {
|
||||
uint32_t initial_width;
|
||||
uint32_t initial_height;
|
||||
u32 __iomem *mmio_virt;
|
||||
int mmio_mtrr;
|
||||
uint32_t capabilities;
|
||||
uint32_t max_gmr_ids;
|
||||
uint32_t max_gmr_pages;
|
||||
@ -631,7 +630,8 @@ extern int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv,
|
||||
uint32_t size,
|
||||
bool shareable,
|
||||
uint32_t *handle,
|
||||
struct vmw_dma_buffer **p_dma_buf);
|
||||
struct vmw_dma_buffer **p_dma_buf,
|
||||
struct ttm_base_object **p_base);
|
||||
extern int vmw_user_dmabuf_reference(struct ttm_object_file *tfile,
|
||||
struct vmw_dma_buffer *dma_buf,
|
||||
uint32_t *handle);
|
||||
@ -645,7 +645,8 @@ extern uint32_t vmw_dmabuf_validate_node(struct ttm_buffer_object *bo,
|
||||
uint32_t cur_validate_node);
|
||||
extern void vmw_dmabuf_validate_clear(struct ttm_buffer_object *bo);
|
||||
extern int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
|
||||
uint32_t id, struct vmw_dma_buffer **out);
|
||||
uint32_t id, struct vmw_dma_buffer **out,
|
||||
struct ttm_base_object **base);
|
||||
extern int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
extern int vmw_stream_unref_ioctl(struct drm_device *dev, void *data,
|
||||
|
@ -1236,7 +1236,8 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
|
||||
struct vmw_relocation *reloc;
|
||||
int ret;
|
||||
|
||||
ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo);
|
||||
ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo,
|
||||
NULL);
|
||||
if (unlikely(ret != 0)) {
|
||||
DRM_ERROR("Could not find or use MOB buffer.\n");
|
||||
ret = -EINVAL;
|
||||
@ -1296,7 +1297,8 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
|
||||
struct vmw_relocation *reloc;
|
||||
int ret;
|
||||
|
||||
ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo);
|
||||
ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo,
|
||||
NULL);
|
||||
if (unlikely(ret != 0)) {
|
||||
DRM_ERROR("Could not find or use GMR region.\n");
|
||||
ret = -EINVAL;
|
||||
|
@ -1685,7 +1685,6 @@ int vmw_kms_helper_dirty(struct vmw_private *dev_priv,
|
||||
struct drm_crtc *crtc;
|
||||
u32 num_units = 0;
|
||||
u32 i, k;
|
||||
int ret;
|
||||
|
||||
dirty->dev_priv = dev_priv;
|
||||
|
||||
@ -1711,7 +1710,7 @@ int vmw_kms_helper_dirty(struct vmw_private *dev_priv,
|
||||
if (!dirty->cmd) {
|
||||
DRM_ERROR("Couldn't reserve fifo space "
|
||||
"for dirty blits.\n");
|
||||
return ret;
|
||||
return -ENOMEM;
|
||||
}
|
||||
memset(dirty->cmd, 0, dirty->fifo_reserve_size);
|
||||
}
|
||||
|
@ -484,7 +484,7 @@ int vmw_overlay_ioctl(struct drm_device *dev, void *data,
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
ret = vmw_user_dmabuf_lookup(tfile, arg->handle, &buf);
|
||||
ret = vmw_user_dmabuf_lookup(tfile, arg->handle, &buf, NULL);
|
||||
if (ret)
|
||||
goto out_unlock;
|
||||
|
||||
|
@ -354,7 +354,7 @@ int vmw_user_lookup_handle(struct vmw_private *dev_priv,
|
||||
}
|
||||
|
||||
*out_surf = NULL;
|
||||
ret = vmw_user_dmabuf_lookup(tfile, handle, out_buf);
|
||||
ret = vmw_user_dmabuf_lookup(tfile, handle, out_buf, NULL);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -481,7 +481,8 @@ int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv,
|
||||
uint32_t size,
|
||||
bool shareable,
|
||||
uint32_t *handle,
|
||||
struct vmw_dma_buffer **p_dma_buf)
|
||||
struct vmw_dma_buffer **p_dma_buf,
|
||||
struct ttm_base_object **p_base)
|
||||
{
|
||||
struct vmw_user_dma_buffer *user_bo;
|
||||
struct ttm_buffer_object *tmp;
|
||||
@ -515,6 +516,10 @@ int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv,
|
||||
}
|
||||
|
||||
*p_dma_buf = &user_bo->dma;
|
||||
if (p_base) {
|
||||
*p_base = &user_bo->prime.base;
|
||||
kref_get(&(*p_base)->refcount);
|
||||
}
|
||||
*handle = user_bo->prime.base.hash.key;
|
||||
|
||||
out_no_base_object:
|
||||
@ -631,6 +636,7 @@ int vmw_user_dmabuf_synccpu_ioctl(struct drm_device *dev, void *data,
|
||||
struct vmw_dma_buffer *dma_buf;
|
||||
struct vmw_user_dma_buffer *user_bo;
|
||||
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
|
||||
struct ttm_base_object *buffer_base;
|
||||
int ret;
|
||||
|
||||
if ((arg->flags & (drm_vmw_synccpu_read | drm_vmw_synccpu_write)) == 0
|
||||
@ -643,7 +649,8 @@ int vmw_user_dmabuf_synccpu_ioctl(struct drm_device *dev, void *data,
|
||||
|
||||
switch (arg->op) {
|
||||
case drm_vmw_synccpu_grab:
|
||||
ret = vmw_user_dmabuf_lookup(tfile, arg->handle, &dma_buf);
|
||||
ret = vmw_user_dmabuf_lookup(tfile, arg->handle, &dma_buf,
|
||||
&buffer_base);
|
||||
if (unlikely(ret != 0))
|
||||
return ret;
|
||||
|
||||
@ -651,6 +658,7 @@ int vmw_user_dmabuf_synccpu_ioctl(struct drm_device *dev, void *data,
|
||||
dma);
|
||||
ret = vmw_user_dmabuf_synccpu_grab(user_bo, tfile, arg->flags);
|
||||
vmw_dmabuf_unreference(&dma_buf);
|
||||
ttm_base_object_unref(&buffer_base);
|
||||
if (unlikely(ret != 0 && ret != -ERESTARTSYS &&
|
||||
ret != -EBUSY)) {
|
||||
DRM_ERROR("Failed synccpu grab on handle 0x%08x.\n",
|
||||
@ -692,7 +700,8 @@ int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
|
||||
return ret;
|
||||
|
||||
ret = vmw_user_dmabuf_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
|
||||
req->size, false, &handle, &dma_buf);
|
||||
req->size, false, &handle, &dma_buf,
|
||||
NULL);
|
||||
if (unlikely(ret != 0))
|
||||
goto out_no_dmabuf;
|
||||
|
||||
@ -721,7 +730,8 @@ int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data,
|
||||
}
|
||||
|
||||
int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
|
||||
uint32_t handle, struct vmw_dma_buffer **out)
|
||||
uint32_t handle, struct vmw_dma_buffer **out,
|
||||
struct ttm_base_object **p_base)
|
||||
{
|
||||
struct vmw_user_dma_buffer *vmw_user_bo;
|
||||
struct ttm_base_object *base;
|
||||
@ -743,7 +753,10 @@ int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
|
||||
vmw_user_bo = container_of(base, struct vmw_user_dma_buffer,
|
||||
prime.base);
|
||||
(void)ttm_bo_reference(&vmw_user_bo->dma.base);
|
||||
ttm_base_object_unref(&base);
|
||||
if (p_base)
|
||||
*p_base = base;
|
||||
else
|
||||
ttm_base_object_unref(&base);
|
||||
*out = &vmw_user_bo->dma;
|
||||
|
||||
return 0;
|
||||
@ -1004,7 +1017,7 @@ int vmw_dumb_create(struct drm_file *file_priv,
|
||||
|
||||
ret = vmw_user_dmabuf_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
|
||||
args->size, false, &args->handle,
|
||||
&dma_buf);
|
||||
&dma_buf, NULL);
|
||||
if (unlikely(ret != 0))
|
||||
goto out_no_dmabuf;
|
||||
|
||||
@ -1032,7 +1045,7 @@ int vmw_dumb_map_offset(struct drm_file *file_priv,
|
||||
struct vmw_dma_buffer *out_buf;
|
||||
int ret;
|
||||
|
||||
ret = vmw_user_dmabuf_lookup(tfile, handle, &out_buf);
|
||||
ret = vmw_user_dmabuf_lookup(tfile, handle, &out_buf, NULL);
|
||||
if (ret != 0)
|
||||
return -EINVAL;
|
||||
|
||||
|
@ -855,7 +855,7 @@ static int vmw_shader_define(struct drm_device *dev, struct drm_file *file_priv,
|
||||
|
||||
if (buffer_handle != SVGA3D_INVALID_ID) {
|
||||
ret = vmw_user_dmabuf_lookup(tfile, buffer_handle,
|
||||
&buffer);
|
||||
&buffer, NULL);
|
||||
if (unlikely(ret != 0)) {
|
||||
DRM_ERROR("Could not find buffer for shader "
|
||||
"creation.\n");
|
||||
|
@ -46,6 +46,7 @@ struct vmw_user_surface {
|
||||
struct vmw_surface srf;
|
||||
uint32_t size;
|
||||
struct drm_master *master;
|
||||
struct ttm_base_object *backup_base;
|
||||
};
|
||||
|
||||
/**
|
||||
@ -656,6 +657,7 @@ static void vmw_user_surface_base_release(struct ttm_base_object **p_base)
|
||||
struct vmw_resource *res = &user_srf->srf.res;
|
||||
|
||||
*p_base = NULL;
|
||||
ttm_base_object_unref(&user_srf->backup_base);
|
||||
vmw_resource_unreference(&res);
|
||||
}
|
||||
|
||||
@ -851,7 +853,8 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
|
||||
res->backup_size,
|
||||
true,
|
||||
&backup_handle,
|
||||
&res->backup);
|
||||
&res->backup,
|
||||
&user_srf->backup_base);
|
||||
if (unlikely(ret != 0)) {
|
||||
vmw_resource_unreference(&res);
|
||||
goto out_unlock;
|
||||
@ -1321,7 +1324,8 @@ int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data,
|
||||
|
||||
if (req->buffer_handle != SVGA3D_INVALID_ID) {
|
||||
ret = vmw_user_dmabuf_lookup(tfile, req->buffer_handle,
|
||||
&res->backup);
|
||||
&res->backup,
|
||||
&user_srf->backup_base);
|
||||
if (ret == 0 && res->backup->base.num_pages * PAGE_SIZE <
|
||||
res->backup_size) {
|
||||
DRM_ERROR("Surface backup buffer is too small.\n");
|
||||
@ -1335,7 +1339,8 @@ int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data,
|
||||
req->drm_surface_flags &
|
||||
drm_vmw_surface_flag_shareable,
|
||||
&backup_handle,
|
||||
&res->backup);
|
||||
&res->backup,
|
||||
&user_srf->backup_base);
|
||||
|
||||
if (unlikely(ret != 0)) {
|
||||
vmw_resource_unreference(&res);
|
||||
|
Loading…
Reference in New Issue
Block a user