mirror of
https://github.com/torvalds/linux.git
synced 2024-11-23 12:42:02 +00:00
Short summary of fixes pull:
* atomic-helper: Relax checks for unregistered connectors * dma-buf: Work around race condition when retrieving fence timestamp * gem: Avoid OOB access in BO memory range * panel: * boe-tv101wun-ml6: Fix flickering * simpledrm: Fix error output * vwmgfx: * Fix size calculation in texture-state code * Ref GEM BOs in surfaces -----BEGIN PGP SIGNATURE----- iQEzBAABCAAdFiEEchf7rIzpz2NEoWjlaA3BHVMLeiMFAmUn1UsACgkQaA3BHVML eiPMJwgAqj4wjd9pblISkKzjAyw+7ouK4NREfikoeSvuu6T5NxHr1kqkCufMfCIw +beXh01yU1QAvon6e/TY+jyapgu4IQOY4yvLWS+3Xb0EMsrvVwKW/2GySrDr7z1F vn0bnbFZs6SiZejKENvespOcfk4ih4WdW0pGgDjH84qHUalt3G+OgcveMhlysJE2 2ZTUFiz2w1QzDxgnBOC0E4nqHkN+9OzGeuBZOK86S3JVtXt4EA1e3a12kMJO1lMj mgzwSBFSvqwIiK4PS2efm65Y53073B9Sg3Yu99F2kWLoxiac3rdFKJ/mfa5I9ePJ ZUmI+vYoBRU7MidHAmFWs5PIR7HsGw== =t3aN -----END PGP SIGNATURE----- Merge tag 'drm-misc-fixes-2023-10-12' of git://anongit.freedesktop.org/drm/drm-misc into drm-fixes Short summary of fixes pull: * atomic-helper: Relax checks for unregistered connectors * dma-buf: Work around race condition when retrieving fence timestamp * gem: Avoid OOB access in BO memory range * panel: * boe-tv101wun-ml6: Fix flickering * simpledrm: Fix error output * vwmgfx: * Fix size calculation in texture-state code * Ref GEM BOs in surfaces Signed-off-by: Dave Airlie <airlied@redhat.com> From: Thomas Zimmermann <tzimmermann@suse.de> Link: https://patchwork.freedesktop.org/patch/msgid/20231012111638.GA25037@linux-uq9g
This commit is contained in:
commit
dcad98b140
@ -76,16 +76,11 @@ struct dma_fence *__dma_fence_unwrap_merge(unsigned int num_fences,
|
||||
dma_fence_unwrap_for_each(tmp, &iter[i], fences[i]) {
|
||||
if (!dma_fence_is_signaled(tmp)) {
|
||||
++count;
|
||||
} else if (test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT,
|
||||
&tmp->flags)) {
|
||||
if (ktime_after(tmp->timestamp, timestamp))
|
||||
timestamp = tmp->timestamp;
|
||||
} else {
|
||||
/*
|
||||
* Use the current time if the fence is
|
||||
* currently signaling.
|
||||
*/
|
||||
timestamp = ktime_get();
|
||||
ktime_t t = dma_fence_timestamp(tmp);
|
||||
|
||||
if (ktime_after(t, timestamp))
|
||||
timestamp = t;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -268,13 +268,10 @@ static int sync_fill_fence_info(struct dma_fence *fence,
|
||||
sizeof(info->driver_name));
|
||||
|
||||
info->status = dma_fence_get_status(fence);
|
||||
while (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags) &&
|
||||
!test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags))
|
||||
cpu_relax();
|
||||
info->timestamp_ns =
|
||||
test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags) ?
|
||||
ktime_to_ns(fence->timestamp) :
|
||||
ktime_set(0, 0);
|
||||
dma_fence_is_signaled(fence) ?
|
||||
ktime_to_ns(dma_fence_timestamp(fence)) :
|
||||
ktime_set(0, 0);
|
||||
|
||||
return info->status;
|
||||
}
|
||||
|
@ -290,7 +290,8 @@ static int
|
||||
update_connector_routing(struct drm_atomic_state *state,
|
||||
struct drm_connector *connector,
|
||||
struct drm_connector_state *old_connector_state,
|
||||
struct drm_connector_state *new_connector_state)
|
||||
struct drm_connector_state *new_connector_state,
|
||||
bool added_by_user)
|
||||
{
|
||||
const struct drm_connector_helper_funcs *funcs;
|
||||
struct drm_encoder *new_encoder;
|
||||
@ -339,9 +340,13 @@ update_connector_routing(struct drm_atomic_state *state,
|
||||
* there's a chance the connector may have been destroyed during the
|
||||
* process, but it's better to ignore that then cause
|
||||
* drm_atomic_helper_resume() to fail.
|
||||
*
|
||||
* Last, we want to ignore connector registration when the connector
|
||||
* was not pulled in the atomic state by user-space (ie, was pulled
|
||||
* in by the driver, e.g. when updating a DP-MST stream).
|
||||
*/
|
||||
if (!state->duplicated && drm_connector_is_unregistered(connector) &&
|
||||
crtc_state->active) {
|
||||
added_by_user && crtc_state->active) {
|
||||
drm_dbg_atomic(connector->dev,
|
||||
"[CONNECTOR:%d:%s] is not registered\n",
|
||||
connector->base.id, connector->name);
|
||||
@ -620,7 +625,10 @@ drm_atomic_helper_check_modeset(struct drm_device *dev,
|
||||
struct drm_connector *connector;
|
||||
struct drm_connector_state *old_connector_state, *new_connector_state;
|
||||
int i, ret;
|
||||
unsigned int connectors_mask = 0;
|
||||
unsigned int connectors_mask = 0, user_connectors_mask = 0;
|
||||
|
||||
for_each_oldnew_connector_in_state(state, connector, old_connector_state, new_connector_state, i)
|
||||
user_connectors_mask |= BIT(i);
|
||||
|
||||
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
|
||||
bool has_connectors =
|
||||
@ -685,7 +693,8 @@ drm_atomic_helper_check_modeset(struct drm_device *dev,
|
||||
*/
|
||||
ret = update_connector_routing(state, connector,
|
||||
old_connector_state,
|
||||
new_connector_state);
|
||||
new_connector_state,
|
||||
BIT(i) & user_connectors_mask);
|
||||
if (ret)
|
||||
return ret;
|
||||
if (old_connector_state->crtc) {
|
||||
|
@ -540,7 +540,7 @@ struct page **drm_gem_get_pages(struct drm_gem_object *obj)
|
||||
struct page **pages;
|
||||
struct folio *folio;
|
||||
struct folio_batch fbatch;
|
||||
int i, j, npages;
|
||||
long i, j, npages;
|
||||
|
||||
if (WARN_ON(!obj->filp))
|
||||
return ERR_PTR(-EINVAL);
|
||||
@ -564,11 +564,13 @@ struct page **drm_gem_get_pages(struct drm_gem_object *obj)
|
||||
|
||||
i = 0;
|
||||
while (i < npages) {
|
||||
long nr;
|
||||
folio = shmem_read_folio_gfp(mapping, i,
|
||||
mapping_gfp_mask(mapping));
|
||||
if (IS_ERR(folio))
|
||||
goto fail;
|
||||
for (j = 0; j < folio_nr_pages(folio); j++, i++)
|
||||
nr = min(npages - i, folio_nr_pages(folio));
|
||||
for (j = 0; j < nr; j++, i++)
|
||||
pages[i] = folio_file_page(folio, i);
|
||||
|
||||
/* Make sure shmem keeps __GFP_DMA32 allocated pages in the
|
||||
|
@ -1342,9 +1342,7 @@ static const struct panel_init_cmd starry_himax83102_j02_init_cmd[] = {
|
||||
_INIT_DCS_CMD(0xB1, 0x01, 0xBF, 0x11),
|
||||
_INIT_DCS_CMD(0xCB, 0x86),
|
||||
_INIT_DCS_CMD(0xD2, 0x3C, 0xFA),
|
||||
_INIT_DCS_CMD(0xE9, 0xC5),
|
||||
_INIT_DCS_CMD(0xD3, 0x00, 0x00, 0x00, 0x00, 0x80, 0x0C, 0x01),
|
||||
_INIT_DCS_CMD(0xE9, 0x3F),
|
||||
_INIT_DCS_CMD(0xD3, 0x00, 0x00, 0x44, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x0C, 0x01),
|
||||
_INIT_DCS_CMD(0xE7, 0x02, 0x00, 0x28, 0x01, 0x7E, 0x0F, 0x7E, 0x10, 0xA0, 0x00, 0x00, 0x20, 0x40, 0x50, 0x40),
|
||||
_INIT_DCS_CMD(0xBD, 0x02),
|
||||
_INIT_DCS_CMD(0xD8, 0xFF, 0xFF, 0xBF, 0xFE, 0xAA, 0xA0, 0xFF, 0xFF, 0xBF, 0xFE, 0xAA, 0xA0),
|
||||
|
@ -929,7 +929,7 @@ drm_sched_get_cleanup_job(struct drm_gpu_scheduler *sched)
|
||||
|
||||
if (next) {
|
||||
next->s_fence->scheduled.timestamp =
|
||||
job->s_fence->finished.timestamp;
|
||||
dma_fence_timestamp(&job->s_fence->finished);
|
||||
/* start TO timer for next job */
|
||||
drm_sched_start_timeout(sched);
|
||||
}
|
||||
|
@ -745,7 +745,7 @@ static struct simpledrm_device *simpledrm_device_create(struct drm_driver *drv,
|
||||
|
||||
ret = devm_aperture_acquire_from_firmware(dev, res->start, resource_size(res));
|
||||
if (ret) {
|
||||
drm_err(dev, "could not acquire memory range %pr: %d\n", &res, ret);
|
||||
drm_err(dev, "could not acquire memory range %pr: %d\n", res, ret);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
|
@ -34,6 +34,8 @@
|
||||
|
||||
static void vmw_bo_release(struct vmw_bo *vbo)
|
||||
{
|
||||
WARN_ON(vbo->tbo.base.funcs &&
|
||||
kref_read(&vbo->tbo.base.refcount) != 0);
|
||||
vmw_bo_unmap(vbo);
|
||||
drm_gem_object_release(&vbo->tbo.base);
|
||||
}
|
||||
@ -497,7 +499,7 @@ static int vmw_user_bo_synccpu_release(struct drm_file *filp,
|
||||
if (!(flags & drm_vmw_synccpu_allow_cs)) {
|
||||
atomic_dec(&vmw_bo->cpu_writers);
|
||||
}
|
||||
vmw_user_bo_unref(vmw_bo);
|
||||
vmw_user_bo_unref(&vmw_bo);
|
||||
}
|
||||
|
||||
return ret;
|
||||
@ -539,7 +541,7 @@ int vmw_user_bo_synccpu_ioctl(struct drm_device *dev, void *data,
|
||||
return ret;
|
||||
|
||||
ret = vmw_user_bo_synccpu_grab(vbo, arg->flags);
|
||||
vmw_user_bo_unref(vbo);
|
||||
vmw_user_bo_unref(&vbo);
|
||||
if (unlikely(ret != 0)) {
|
||||
if (ret == -ERESTARTSYS || ret == -EBUSY)
|
||||
return -EBUSY;
|
||||
@ -612,7 +614,6 @@ int vmw_user_bo_lookup(struct drm_file *filp,
|
||||
}
|
||||
|
||||
*out = to_vmw_bo(gobj);
|
||||
ttm_bo_get(&(*out)->tbo);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -195,12 +195,19 @@ static inline struct vmw_bo *vmw_bo_reference(struct vmw_bo *buf)
|
||||
return buf;
|
||||
}
|
||||
|
||||
static inline void vmw_user_bo_unref(struct vmw_bo *vbo)
|
||||
static inline struct vmw_bo *vmw_user_bo_ref(struct vmw_bo *vbo)
|
||||
{
|
||||
if (vbo) {
|
||||
ttm_bo_put(&vbo->tbo);
|
||||
drm_gem_object_put(&vbo->tbo.base);
|
||||
}
|
||||
drm_gem_object_get(&vbo->tbo.base);
|
||||
return vbo;
|
||||
}
|
||||
|
||||
static inline void vmw_user_bo_unref(struct vmw_bo **buf)
|
||||
{
|
||||
struct vmw_bo *tmp_buf = *buf;
|
||||
|
||||
*buf = NULL;
|
||||
if (tmp_buf)
|
||||
drm_gem_object_put(&tmp_buf->tbo.base);
|
||||
}
|
||||
|
||||
static inline struct vmw_bo *to_vmw_bo(struct drm_gem_object *gobj)
|
||||
|
@ -432,7 +432,7 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size)
|
||||
* for the new COTable. Initially pin the buffer object to make sure
|
||||
* we can use tryreserve without failure.
|
||||
*/
|
||||
ret = vmw_bo_create(dev_priv, &bo_params, &buf);
|
||||
ret = vmw_gem_object_create(dev_priv, &bo_params, &buf);
|
||||
if (ret) {
|
||||
DRM_ERROR("Failed initializing new cotable MOB.\n");
|
||||
goto out_done;
|
||||
@ -502,7 +502,7 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size)
|
||||
|
||||
vmw_resource_mob_attach(res);
|
||||
/* Let go of the old mob. */
|
||||
vmw_bo_unreference(&old_buf);
|
||||
vmw_user_bo_unref(&old_buf);
|
||||
res->id = vcotbl->type;
|
||||
|
||||
ret = dma_resv_reserve_fences(bo->base.resv, 1);
|
||||
@ -521,7 +521,7 @@ out_map_new:
|
||||
out_wait:
|
||||
ttm_bo_unpin(bo);
|
||||
ttm_bo_unreserve(bo);
|
||||
vmw_bo_unreference(&buf);
|
||||
vmw_user_bo_unref(&buf);
|
||||
|
||||
out_done:
|
||||
MKS_STAT_TIME_POP(MKSSTAT_KERN_COTABLE_RESIZE);
|
||||
|
@ -853,6 +853,10 @@ static inline bool vmw_resource_mob_attached(const struct vmw_resource *res)
|
||||
/**
|
||||
* GEM related functionality - vmwgfx_gem.c
|
||||
*/
|
||||
struct vmw_bo_params;
|
||||
int vmw_gem_object_create(struct vmw_private *vmw,
|
||||
struct vmw_bo_params *params,
|
||||
struct vmw_bo **p_vbo);
|
||||
extern int vmw_gem_object_create_with_handle(struct vmw_private *dev_priv,
|
||||
struct drm_file *filp,
|
||||
uint32_t size,
|
||||
|
@ -1151,7 +1151,7 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
|
||||
SVGAMobId *id,
|
||||
struct vmw_bo **vmw_bo_p)
|
||||
{
|
||||
struct vmw_bo *vmw_bo;
|
||||
struct vmw_bo *vmw_bo, *tmp_bo;
|
||||
uint32_t handle = *id;
|
||||
struct vmw_relocation *reloc;
|
||||
int ret;
|
||||
@ -1164,7 +1164,8 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
|
||||
}
|
||||
vmw_bo_placement_set(vmw_bo, VMW_BO_DOMAIN_MOB, VMW_BO_DOMAIN_MOB);
|
||||
ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo);
|
||||
vmw_user_bo_unref(vmw_bo);
|
||||
tmp_bo = vmw_bo;
|
||||
vmw_user_bo_unref(&tmp_bo);
|
||||
if (unlikely(ret != 0))
|
||||
return ret;
|
||||
|
||||
@ -1206,7 +1207,7 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
|
||||
SVGAGuestPtr *ptr,
|
||||
struct vmw_bo **vmw_bo_p)
|
||||
{
|
||||
struct vmw_bo *vmw_bo;
|
||||
struct vmw_bo *vmw_bo, *tmp_bo;
|
||||
uint32_t handle = ptr->gmrId;
|
||||
struct vmw_relocation *reloc;
|
||||
int ret;
|
||||
@ -1220,7 +1221,8 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
|
||||
vmw_bo_placement_set(vmw_bo, VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM,
|
||||
VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM);
|
||||
ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo);
|
||||
vmw_user_bo_unref(vmw_bo);
|
||||
tmp_bo = vmw_bo;
|
||||
vmw_user_bo_unref(&tmp_bo);
|
||||
if (unlikely(ret != 0))
|
||||
return ret;
|
||||
|
||||
@ -1619,7 +1621,7 @@ static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
|
||||
{
|
||||
VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSetTextureState);
|
||||
SVGA3dTextureState *last_state = (SVGA3dTextureState *)
|
||||
((unsigned long) header + header->size + sizeof(header));
|
||||
((unsigned long) header + header->size + sizeof(*header));
|
||||
SVGA3dTextureState *cur_state = (SVGA3dTextureState *)
|
||||
((unsigned long) header + sizeof(*cmd));
|
||||
struct vmw_resource *ctx;
|
||||
|
@ -111,6 +111,20 @@ static const struct drm_gem_object_funcs vmw_gem_object_funcs = {
|
||||
.vm_ops = &vmw_vm_ops,
|
||||
};
|
||||
|
||||
int vmw_gem_object_create(struct vmw_private *vmw,
|
||||
struct vmw_bo_params *params,
|
||||
struct vmw_bo **p_vbo)
|
||||
{
|
||||
int ret = vmw_bo_create(vmw, params, p_vbo);
|
||||
|
||||
if (ret != 0)
|
||||
goto out_no_bo;
|
||||
|
||||
(*p_vbo)->tbo.base.funcs = &vmw_gem_object_funcs;
|
||||
out_no_bo:
|
||||
return ret;
|
||||
}
|
||||
|
||||
int vmw_gem_object_create_with_handle(struct vmw_private *dev_priv,
|
||||
struct drm_file *filp,
|
||||
uint32_t size,
|
||||
@ -126,12 +140,10 @@ int vmw_gem_object_create_with_handle(struct vmw_private *dev_priv,
|
||||
.pin = false
|
||||
};
|
||||
|
||||
ret = vmw_bo_create(dev_priv, ¶ms, p_vbo);
|
||||
ret = vmw_gem_object_create(dev_priv, ¶ms, p_vbo);
|
||||
if (ret != 0)
|
||||
goto out_no_bo;
|
||||
|
||||
(*p_vbo)->tbo.base.funcs = &vmw_gem_object_funcs;
|
||||
|
||||
ret = drm_gem_handle_create(filp, &(*p_vbo)->tbo.base, handle);
|
||||
out_no_bo:
|
||||
return ret;
|
||||
|
@ -1471,8 +1471,8 @@ static int vmw_create_bo_proxy(struct drm_device *dev,
|
||||
/* Reserve and switch the backing mob. */
|
||||
mutex_lock(&res->dev_priv->cmdbuf_mutex);
|
||||
(void) vmw_resource_reserve(res, false, true);
|
||||
vmw_bo_unreference(&res->guest_memory_bo);
|
||||
res->guest_memory_bo = vmw_bo_reference(bo_mob);
|
||||
vmw_user_bo_unref(&res->guest_memory_bo);
|
||||
res->guest_memory_bo = vmw_user_bo_ref(bo_mob);
|
||||
res->guest_memory_offset = 0;
|
||||
vmw_resource_unreserve(res, false, false, false, NULL, 0);
|
||||
mutex_unlock(&res->dev_priv->cmdbuf_mutex);
|
||||
@ -1666,7 +1666,7 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
|
||||
err_out:
|
||||
/* vmw_user_lookup_handle takes one ref so does new_fb */
|
||||
if (bo)
|
||||
vmw_user_bo_unref(bo);
|
||||
vmw_user_bo_unref(&bo);
|
||||
if (surface)
|
||||
vmw_surface_unreference(&surface);
|
||||
|
||||
|
@ -451,7 +451,7 @@ int vmw_overlay_ioctl(struct drm_device *dev, void *data,
|
||||
|
||||
ret = vmw_overlay_update_stream(dev_priv, buf, arg, true);
|
||||
|
||||
vmw_user_bo_unref(buf);
|
||||
vmw_user_bo_unref(&buf);
|
||||
|
||||
out_unlock:
|
||||
mutex_unlock(&overlay->mutex);
|
||||
|
@ -141,7 +141,7 @@ static void vmw_resource_release(struct kref *kref)
|
||||
if (res->coherent)
|
||||
vmw_bo_dirty_release(res->guest_memory_bo);
|
||||
ttm_bo_unreserve(bo);
|
||||
vmw_bo_unreference(&res->guest_memory_bo);
|
||||
vmw_user_bo_unref(&res->guest_memory_bo);
|
||||
}
|
||||
|
||||
if (likely(res->hw_destroy != NULL)) {
|
||||
@ -338,7 +338,7 @@ static int vmw_resource_buf_alloc(struct vmw_resource *res,
|
||||
return 0;
|
||||
}
|
||||
|
||||
ret = vmw_bo_create(res->dev_priv, &bo_params, &gbo);
|
||||
ret = vmw_gem_object_create(res->dev_priv, &bo_params, &gbo);
|
||||
if (unlikely(ret != 0))
|
||||
goto out_no_bo;
|
||||
|
||||
@ -457,11 +457,11 @@ void vmw_resource_unreserve(struct vmw_resource *res,
|
||||
vmw_resource_mob_detach(res);
|
||||
if (res->coherent)
|
||||
vmw_bo_dirty_release(res->guest_memory_bo);
|
||||
vmw_bo_unreference(&res->guest_memory_bo);
|
||||
vmw_user_bo_unref(&res->guest_memory_bo);
|
||||
}
|
||||
|
||||
if (new_guest_memory_bo) {
|
||||
res->guest_memory_bo = vmw_bo_reference(new_guest_memory_bo);
|
||||
res->guest_memory_bo = vmw_user_bo_ref(new_guest_memory_bo);
|
||||
|
||||
/*
|
||||
* The validation code should already have added a
|
||||
@ -551,7 +551,7 @@ out_no_reserve:
|
||||
ttm_bo_put(val_buf->bo);
|
||||
val_buf->bo = NULL;
|
||||
if (guest_memory_dirty)
|
||||
vmw_bo_unreference(&res->guest_memory_bo);
|
||||
vmw_user_bo_unref(&res->guest_memory_bo);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -727,7 +727,7 @@ int vmw_resource_validate(struct vmw_resource *res, bool intr,
|
||||
goto out_no_validate;
|
||||
else if (!res->func->needs_guest_memory && res->guest_memory_bo) {
|
||||
WARN_ON_ONCE(vmw_resource_mob_attached(res));
|
||||
vmw_bo_unreference(&res->guest_memory_bo);
|
||||
vmw_user_bo_unref(&res->guest_memory_bo);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -180,7 +180,7 @@ static int vmw_gb_shader_init(struct vmw_private *dev_priv,
|
||||
|
||||
res->guest_memory_size = size;
|
||||
if (byte_code) {
|
||||
res->guest_memory_bo = vmw_bo_reference(byte_code);
|
||||
res->guest_memory_bo = vmw_user_bo_ref(byte_code);
|
||||
res->guest_memory_offset = offset;
|
||||
}
|
||||
shader->size = size;
|
||||
@ -809,7 +809,7 @@ static int vmw_shader_define(struct drm_device *dev, struct drm_file *file_priv,
|
||||
shader_type, num_input_sig,
|
||||
num_output_sig, tfile, shader_handle);
|
||||
out_bad_arg:
|
||||
vmw_user_bo_unref(buffer);
|
||||
vmw_user_bo_unref(&buffer);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -686,9 +686,6 @@ static void vmw_user_surface_base_release(struct ttm_base_object **p_base)
|
||||
container_of(base, struct vmw_user_surface, prime.base);
|
||||
struct vmw_resource *res = &user_srf->srf.res;
|
||||
|
||||
if (res->guest_memory_bo)
|
||||
drm_gem_object_put(&res->guest_memory_bo->tbo.base);
|
||||
|
||||
*p_base = NULL;
|
||||
vmw_resource_unreference(&res);
|
||||
}
|
||||
@ -855,23 +852,21 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
|
||||
* expect a backup buffer to be present.
|
||||
*/
|
||||
if (dev_priv->has_mob && req->shareable) {
|
||||
uint32_t backup_handle;
|
||||
struct vmw_bo_params params = {
|
||||
.domain = VMW_BO_DOMAIN_SYS,
|
||||
.busy_domain = VMW_BO_DOMAIN_SYS,
|
||||
.bo_type = ttm_bo_type_device,
|
||||
.size = res->guest_memory_size,
|
||||
.pin = false
|
||||
};
|
||||
|
||||
ret = vmw_gem_object_create_with_handle(dev_priv,
|
||||
file_priv,
|
||||
res->guest_memory_size,
|
||||
&backup_handle,
|
||||
&res->guest_memory_bo);
|
||||
ret = vmw_gem_object_create(dev_priv,
|
||||
¶ms,
|
||||
&res->guest_memory_bo);
|
||||
if (unlikely(ret != 0)) {
|
||||
vmw_resource_unreference(&res);
|
||||
goto out_unlock;
|
||||
}
|
||||
vmw_bo_reference(res->guest_memory_bo);
|
||||
/*
|
||||
* We don't expose the handle to the userspace and surface
|
||||
* already holds a gem reference
|
||||
*/
|
||||
drm_gem_handle_delete(file_priv, backup_handle);
|
||||
}
|
||||
|
||||
tmp = vmw_resource_reference(&srf->res);
|
||||
@ -1512,7 +1507,7 @@ vmw_gb_surface_define_internal(struct drm_device *dev,
|
||||
if (ret == 0) {
|
||||
if (res->guest_memory_bo->tbo.base.size < res->guest_memory_size) {
|
||||
VMW_DEBUG_USER("Surface backup buffer too small.\n");
|
||||
vmw_bo_unreference(&res->guest_memory_bo);
|
||||
vmw_user_bo_unref(&res->guest_memory_bo);
|
||||
ret = -EINVAL;
|
||||
goto out_unlock;
|
||||
} else {
|
||||
@ -1526,8 +1521,6 @@ vmw_gb_surface_define_internal(struct drm_device *dev,
|
||||
res->guest_memory_size,
|
||||
&backup_handle,
|
||||
&res->guest_memory_bo);
|
||||
if (ret == 0)
|
||||
vmw_bo_reference(res->guest_memory_bo);
|
||||
}
|
||||
|
||||
if (unlikely(ret != 0)) {
|
||||
|
@ -568,6 +568,25 @@ static inline void dma_fence_set_error(struct dma_fence *fence,
|
||||
fence->error = error;
|
||||
}
|
||||
|
||||
/**
|
||||
* dma_fence_timestamp - helper to get the completion timestamp of a fence
|
||||
* @fence: fence to get the timestamp from.
|
||||
*
|
||||
* After a fence is signaled the timestamp is updated with the signaling time,
|
||||
* but setting the timestamp can race with tasks waiting for the signaling. This
|
||||
* helper busy waits for the correct timestamp to appear.
|
||||
*/
|
||||
static inline ktime_t dma_fence_timestamp(struct dma_fence *fence)
|
||||
{
|
||||
if (WARN_ON(!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)))
|
||||
return ktime_get();
|
||||
|
||||
while (!test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags))
|
||||
cpu_relax();
|
||||
|
||||
return fence->timestamp;
|
||||
}
|
||||
|
||||
signed long dma_fence_wait_timeout(struct dma_fence *,
|
||||
bool intr, signed long timeout);
|
||||
signed long dma_fence_wait_any_timeout(struct dma_fence **fences,
|
||||
|
Loading…
Reference in New Issue
Block a user