mirror of
https://github.com/torvalds/linux.git
synced 2024-11-24 13:11:40 +00:00
drm/vmwgfx: Use new validation interface for the modesetting code v2
Strip the old KMS helpers and use the new validation interface also in the modesetting code. Signed-off-by: Thomas Hellstrom <thellstrom@vmware.com> Reviewed-by: Deepak Rawat <drawat@vmware.com> #v1 Reviewed-by: Sinclair Yeh <syeh@vmware.com>
This commit is contained in:
parent
9c079b8ce8
commit
2724b2d54c
@ -2557,88 +2557,31 @@ int vmw_kms_helper_dirty(struct vmw_private *dev_priv,
|
||||
}
|
||||
|
||||
/**
|
||||
* vmw_kms_helper_buffer_prepare - Reserve and validate a buffer object before
|
||||
* command submission.
|
||||
*
|
||||
* @dev_priv. Pointer to a device private structure.
|
||||
* @buf: The buffer object
|
||||
* @interruptible: Whether to perform waits as interruptible.
|
||||
* @validate_as_mob: Whether the buffer should be validated as a MOB. If false,
|
||||
* The buffer will be validated as a GMR. Already pinned buffers will not be
|
||||
* validated.
|
||||
*
|
||||
* Returns 0 on success, negative error code on failure, -ERESTARTSYS if
|
||||
* interrupted by a signal.
|
||||
* vmw_kms_helper_validation_finish - Helper for post KMS command submission
|
||||
* cleanup and fencing
|
||||
* @dev_priv: Pointer to the device-private struct
|
||||
* @file_priv: Pointer identifying the client when user-space fencing is used
|
||||
* @ctx: Pointer to the validation context
|
||||
* @out_fence: If non-NULL, returned refcounted fence-pointer
|
||||
* @user_fence_rep: If non-NULL, pointer to user-space address area
|
||||
* in which to copy user-space fence info
|
||||
*/
|
||||
int vmw_kms_helper_buffer_prepare(struct vmw_private *dev_priv,
|
||||
struct vmw_buffer_object *buf,
|
||||
bool interruptible,
|
||||
bool validate_as_mob,
|
||||
bool for_cpu_blit)
|
||||
void vmw_kms_helper_validation_finish(struct vmw_private *dev_priv,
|
||||
struct drm_file *file_priv,
|
||||
struct vmw_validation_context *ctx,
|
||||
struct vmw_fence_obj **out_fence,
|
||||
struct drm_vmw_fence_rep __user *
|
||||
user_fence_rep)
|
||||
{
|
||||
struct ttm_operation_ctx ctx = {
|
||||
.interruptible = interruptible,
|
||||
.no_wait_gpu = false};
|
||||
struct ttm_buffer_object *bo = &buf->base;
|
||||
int ret;
|
||||
|
||||
ttm_bo_reserve(bo, false, false, NULL);
|
||||
if (for_cpu_blit)
|
||||
ret = ttm_bo_validate(bo, &vmw_nonfixed_placement, &ctx);
|
||||
else
|
||||
ret = vmw_validation_bo_validate_single(bo, interruptible,
|
||||
validate_as_mob);
|
||||
if (ret)
|
||||
ttm_bo_unreserve(bo);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* vmw_kms_helper_buffer_revert - Undo the actions of
|
||||
* vmw_kms_helper_buffer_prepare.
|
||||
*
|
||||
* @res: Pointer to the buffer object.
|
||||
*
|
||||
* Helper to be used if an error forces the caller to undo the actions of
|
||||
* vmw_kms_helper_buffer_prepare.
|
||||
*/
|
||||
void vmw_kms_helper_buffer_revert(struct vmw_buffer_object *buf)
|
||||
{
|
||||
if (buf)
|
||||
ttm_bo_unreserve(&buf->base);
|
||||
}
|
||||
|
||||
/**
|
||||
* vmw_kms_helper_buffer_finish - Unreserve and fence a buffer object after
|
||||
* kms command submission.
|
||||
*
|
||||
* @dev_priv: Pointer to a device private structure.
|
||||
* @file_priv: Pointer to a struct drm_file representing the caller's
|
||||
* connection. Must be set to NULL if @user_fence_rep is NULL, and conversely
|
||||
* if non-NULL, @user_fence_rep must be non-NULL.
|
||||
* @buf: The buffer object.
|
||||
* @out_fence: Optional pointer to a fence pointer. If non-NULL, a
|
||||
* ref-counted fence pointer is returned here.
|
||||
* @user_fence_rep: Optional pointer to a user-space provided struct
|
||||
* drm_vmw_fence_rep. If provided, @file_priv must also be provided and the
|
||||
* function copies fence data to user-space in a fail-safe manner.
|
||||
*/
|
||||
void vmw_kms_helper_buffer_finish(struct vmw_private *dev_priv,
|
||||
struct drm_file *file_priv,
|
||||
struct vmw_buffer_object *buf,
|
||||
struct vmw_fence_obj **out_fence,
|
||||
struct drm_vmw_fence_rep __user *
|
||||
user_fence_rep)
|
||||
{
|
||||
struct vmw_fence_obj *fence;
|
||||
struct vmw_fence_obj *fence = NULL;
|
||||
uint32_t handle;
|
||||
int ret;
|
||||
|
||||
ret = vmw_execbuf_fence_commands(file_priv, dev_priv, &fence,
|
||||
file_priv ? &handle : NULL);
|
||||
if (buf)
|
||||
vmw_bo_fence_single(&buf->base, fence);
|
||||
if (file_priv || user_fence_rep || vmw_validation_has_bos(ctx) ||
|
||||
out_fence)
|
||||
ret = vmw_execbuf_fence_commands(file_priv, dev_priv, &fence,
|
||||
file_priv ? &handle : NULL);
|
||||
vmw_validation_done(ctx, fence);
|
||||
if (file_priv)
|
||||
vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv),
|
||||
ret, user_fence_rep, fence,
|
||||
@ -2647,106 +2590,6 @@ void vmw_kms_helper_buffer_finish(struct vmw_private *dev_priv,
|
||||
*out_fence = fence;
|
||||
else
|
||||
vmw_fence_obj_unreference(&fence);
|
||||
|
||||
vmw_kms_helper_buffer_revert(buf);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* vmw_kms_helper_resource_revert - Undo the actions of
|
||||
* vmw_kms_helper_resource_prepare.
|
||||
*
|
||||
* @res: Pointer to the resource. Typically a surface.
|
||||
*
|
||||
* Helper to be used if an error forces the caller to undo the actions of
|
||||
* vmw_kms_helper_resource_prepare.
|
||||
*/
|
||||
void vmw_kms_helper_resource_revert(struct vmw_validation_ctx *ctx)
|
||||
{
|
||||
struct vmw_resource *res = ctx->res;
|
||||
|
||||
vmw_kms_helper_buffer_revert(ctx->buf);
|
||||
vmw_bo_unreference(&ctx->buf);
|
||||
vmw_resource_unreserve(res, false, NULL, 0);
|
||||
mutex_unlock(&res->dev_priv->cmdbuf_mutex);
|
||||
}
|
||||
|
||||
/**
|
||||
* vmw_kms_helper_resource_prepare - Reserve and validate a resource before
|
||||
* command submission.
|
||||
*
|
||||
* @res: Pointer to the resource. Typically a surface.
|
||||
* @interruptible: Whether to perform waits as interruptible.
|
||||
*
|
||||
* Reserves and validates also the backup buffer if a guest-backed resource.
|
||||
* Returns 0 on success, negative error code on failure. -ERESTARTSYS if
|
||||
* interrupted by a signal.
|
||||
*/
|
||||
int vmw_kms_helper_resource_prepare(struct vmw_resource *res,
|
||||
bool interruptible,
|
||||
struct vmw_validation_ctx *ctx)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
ctx->buf = NULL;
|
||||
ctx->res = res;
|
||||
|
||||
if (interruptible)
|
||||
ret = mutex_lock_interruptible(&res->dev_priv->cmdbuf_mutex);
|
||||
else
|
||||
mutex_lock(&res->dev_priv->cmdbuf_mutex);
|
||||
|
||||
if (unlikely(ret != 0))
|
||||
return -ERESTARTSYS;
|
||||
|
||||
ret = vmw_resource_reserve(res, interruptible, false);
|
||||
if (ret)
|
||||
goto out_unlock;
|
||||
|
||||
if (res->backup) {
|
||||
ret = vmw_kms_helper_buffer_prepare(res->dev_priv, res->backup,
|
||||
interruptible,
|
||||
res->dev_priv->has_mob,
|
||||
false);
|
||||
if (ret)
|
||||
goto out_unreserve;
|
||||
|
||||
ctx->buf = vmw_bo_reference(res->backup);
|
||||
}
|
||||
ret = vmw_resource_validate(res, interruptible);
|
||||
if (ret)
|
||||
goto out_revert;
|
||||
return 0;
|
||||
|
||||
out_revert:
|
||||
vmw_kms_helper_buffer_revert(ctx->buf);
|
||||
out_unreserve:
|
||||
vmw_resource_unreserve(res, false, NULL, 0);
|
||||
out_unlock:
|
||||
mutex_unlock(&res->dev_priv->cmdbuf_mutex);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* vmw_kms_helper_resource_finish - Unreserve and fence a resource after
|
||||
* kms command submission.
|
||||
*
|
||||
* @res: Pointer to the resource. Typically a surface.
|
||||
* @out_fence: Optional pointer to a fence pointer. If non-NULL, a
|
||||
* ref-counted fence pointer is returned here.
|
||||
*/
|
||||
void vmw_kms_helper_resource_finish(struct vmw_validation_ctx *ctx,
|
||||
struct vmw_fence_obj **out_fence)
|
||||
{
|
||||
struct vmw_resource *res = ctx->res;
|
||||
|
||||
if (ctx->buf || out_fence)
|
||||
vmw_kms_helper_buffer_finish(res->dev_priv, NULL, ctx->buf,
|
||||
out_fence, NULL);
|
||||
|
||||
vmw_bo_unreference(&ctx->buf);
|
||||
vmw_resource_unreserve(res, false, NULL, 0);
|
||||
mutex_unlock(&res->dev_priv->cmdbuf_mutex);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -308,24 +308,12 @@ int vmw_kms_helper_dirty(struct vmw_private *dev_priv,
|
||||
int increment,
|
||||
struct vmw_kms_dirty *dirty);
|
||||
|
||||
int vmw_kms_helper_buffer_prepare(struct vmw_private *dev_priv,
|
||||
struct vmw_buffer_object *buf,
|
||||
bool interruptible,
|
||||
bool validate_as_mob,
|
||||
bool for_cpu_blit);
|
||||
void vmw_kms_helper_buffer_revert(struct vmw_buffer_object *buf);
|
||||
void vmw_kms_helper_buffer_finish(struct vmw_private *dev_priv,
|
||||
struct drm_file *file_priv,
|
||||
struct vmw_buffer_object *buf,
|
||||
struct vmw_fence_obj **out_fence,
|
||||
struct drm_vmw_fence_rep __user *
|
||||
user_fence_rep);
|
||||
int vmw_kms_helper_resource_prepare(struct vmw_resource *res,
|
||||
bool interruptible,
|
||||
struct vmw_validation_ctx *ctx);
|
||||
void vmw_kms_helper_resource_revert(struct vmw_validation_ctx *ctx);
|
||||
void vmw_kms_helper_resource_finish(struct vmw_validation_ctx *ctx,
|
||||
struct vmw_fence_obj **out_fence);
|
||||
void vmw_kms_helper_validation_finish(struct vmw_private *dev_priv,
|
||||
struct drm_file *file_priv,
|
||||
struct vmw_validation_context *ctx,
|
||||
struct vmw_fence_obj **out_fence,
|
||||
struct drm_vmw_fence_rep __user *
|
||||
user_fence_rep);
|
||||
int vmw_kms_readback(struct vmw_private *dev_priv,
|
||||
struct drm_file *file_priv,
|
||||
struct vmw_framebuffer *vfb,
|
||||
|
@ -946,16 +946,20 @@ int vmw_kms_sou_do_surface_dirty(struct vmw_private *dev_priv,
|
||||
struct vmw_framebuffer_surface *vfbs =
|
||||
container_of(framebuffer, typeof(*vfbs), base);
|
||||
struct vmw_kms_sou_surface_dirty sdirty;
|
||||
struct vmw_validation_ctx ctx;
|
||||
DECLARE_VAL_CONTEXT(val_ctx, NULL, 0);
|
||||
int ret;
|
||||
|
||||
if (!srf)
|
||||
srf = &vfbs->surface->res;
|
||||
|
||||
ret = vmw_kms_helper_resource_prepare(srf, true, &ctx);
|
||||
ret = vmw_validation_add_resource(&val_ctx, srf, 0, NULL, NULL);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = vmw_validation_prepare(&val_ctx, &dev_priv->cmdbuf_mutex, true);
|
||||
if (ret)
|
||||
goto out_unref;
|
||||
|
||||
sdirty.base.fifo_commit = vmw_sou_surface_fifo_commit;
|
||||
sdirty.base.clip = vmw_sou_surface_clip;
|
||||
sdirty.base.dev_priv = dev_priv;
|
||||
@ -972,9 +976,14 @@ int vmw_kms_sou_do_surface_dirty(struct vmw_private *dev_priv,
|
||||
ret = vmw_kms_helper_dirty(dev_priv, framebuffer, clips, vclips,
|
||||
dest_x, dest_y, num_clips, inc,
|
||||
&sdirty.base);
|
||||
vmw_kms_helper_resource_finish(&ctx, out_fence);
|
||||
vmw_kms_helper_validation_finish(dev_priv, NULL, &val_ctx, out_fence,
|
||||
NULL);
|
||||
|
||||
return ret;
|
||||
|
||||
out_unref:
|
||||
vmw_validation_unref_lists(&val_ctx);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -1051,13 +1060,17 @@ int vmw_kms_sou_do_bo_dirty(struct vmw_private *dev_priv,
|
||||
container_of(framebuffer, struct vmw_framebuffer_bo,
|
||||
base)->buffer;
|
||||
struct vmw_kms_dirty dirty;
|
||||
DECLARE_VAL_CONTEXT(val_ctx, NULL, 0);
|
||||
int ret;
|
||||
|
||||
ret = vmw_kms_helper_buffer_prepare(dev_priv, buf, interruptible,
|
||||
false, false);
|
||||
ret = vmw_validation_add_bo(&val_ctx, buf, false, false);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = vmw_validation_prepare(&val_ctx, NULL, interruptible);
|
||||
if (ret)
|
||||
goto out_unref;
|
||||
|
||||
ret = do_bo_define_gmrfb(dev_priv, framebuffer);
|
||||
if (unlikely(ret != 0))
|
||||
goto out_revert;
|
||||
@ -1069,12 +1082,15 @@ int vmw_kms_sou_do_bo_dirty(struct vmw_private *dev_priv,
|
||||
num_clips;
|
||||
ret = vmw_kms_helper_dirty(dev_priv, framebuffer, clips, vclips,
|
||||
0, 0, num_clips, increment, &dirty);
|
||||
vmw_kms_helper_buffer_finish(dev_priv, NULL, buf, out_fence, NULL);
|
||||
vmw_kms_helper_validation_finish(dev_priv, NULL, &val_ctx, out_fence,
|
||||
NULL);
|
||||
|
||||
return ret;
|
||||
|
||||
out_revert:
|
||||
vmw_kms_helper_buffer_revert(buf);
|
||||
vmw_validation_revert(&val_ctx);
|
||||
out_unref:
|
||||
vmw_validation_unref_lists(&val_ctx);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -1150,13 +1166,17 @@ int vmw_kms_sou_readback(struct vmw_private *dev_priv,
|
||||
struct vmw_buffer_object *buf =
|
||||
container_of(vfb, struct vmw_framebuffer_bo, base)->buffer;
|
||||
struct vmw_kms_dirty dirty;
|
||||
DECLARE_VAL_CONTEXT(val_ctx, NULL, 0);
|
||||
int ret;
|
||||
|
||||
ret = vmw_kms_helper_buffer_prepare(dev_priv, buf, true, false,
|
||||
false);
|
||||
ret = vmw_validation_add_bo(&val_ctx, buf, false, false);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = vmw_validation_prepare(&val_ctx, NULL, true);
|
||||
if (ret)
|
||||
goto out_unref;
|
||||
|
||||
ret = do_bo_define_gmrfb(dev_priv, vfb);
|
||||
if (unlikely(ret != 0))
|
||||
goto out_revert;
|
||||
@ -1168,13 +1188,15 @@ int vmw_kms_sou_readback(struct vmw_private *dev_priv,
|
||||
num_clips;
|
||||
ret = vmw_kms_helper_dirty(dev_priv, vfb, NULL, vclips,
|
||||
0, 0, num_clips, 1, &dirty);
|
||||
vmw_kms_helper_buffer_finish(dev_priv, file_priv, buf, NULL,
|
||||
user_fence_rep);
|
||||
vmw_kms_helper_validation_finish(dev_priv, file_priv, &val_ctx, NULL,
|
||||
user_fence_rep);
|
||||
|
||||
return ret;
|
||||
|
||||
out_revert:
|
||||
vmw_kms_helper_buffer_revert(buf);
|
||||
|
||||
vmw_validation_revert(&val_ctx);
|
||||
out_unref:
|
||||
vmw_validation_unref_lists(&val_ctx);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -759,17 +759,21 @@ int vmw_kms_stdu_dma(struct vmw_private *dev_priv,
|
||||
struct vmw_stdu_dirty ddirty;
|
||||
int ret;
|
||||
bool cpu_blit = !(dev_priv->capabilities & SVGA_CAP_3D);
|
||||
DECLARE_VAL_CONTEXT(val_ctx, NULL, 0);
|
||||
|
||||
/*
|
||||
* VMs without 3D support don't have the surface DMA command and
|
||||
* we'll be using a CPU blit, and the framebuffer should be moved out
|
||||
* of VRAM.
|
||||
*/
|
||||
ret = vmw_kms_helper_buffer_prepare(dev_priv, buf, interruptible,
|
||||
false, cpu_blit);
|
||||
ret = vmw_validation_add_bo(&val_ctx, buf, false, cpu_blit);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = vmw_validation_prepare(&val_ctx, NULL, interruptible);
|
||||
if (ret)
|
||||
goto out_unref;
|
||||
|
||||
ddirty.transfer = (to_surface) ? SVGA3D_WRITE_HOST_VRAM :
|
||||
SVGA3D_READ_HOST_VRAM;
|
||||
ddirty.left = ddirty.top = S32_MAX;
|
||||
@ -796,9 +800,13 @@ int vmw_kms_stdu_dma(struct vmw_private *dev_priv,
|
||||
|
||||
ret = vmw_kms_helper_dirty(dev_priv, vfb, clips, vclips,
|
||||
0, 0, num_clips, increment, &ddirty.base);
|
||||
vmw_kms_helper_buffer_finish(dev_priv, file_priv, buf, NULL,
|
||||
user_fence_rep);
|
||||
|
||||
vmw_kms_helper_validation_finish(dev_priv, file_priv, &val_ctx, NULL,
|
||||
user_fence_rep);
|
||||
return ret;
|
||||
|
||||
out_unref:
|
||||
vmw_validation_unref_lists(&val_ctx);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -924,16 +932,20 @@ int vmw_kms_stdu_surface_dirty(struct vmw_private *dev_priv,
|
||||
struct vmw_framebuffer_surface *vfbs =
|
||||
container_of(framebuffer, typeof(*vfbs), base);
|
||||
struct vmw_stdu_dirty sdirty;
|
||||
struct vmw_validation_ctx ctx;
|
||||
DECLARE_VAL_CONTEXT(val_ctx, NULL, 0);
|
||||
int ret;
|
||||
|
||||
if (!srf)
|
||||
srf = &vfbs->surface->res;
|
||||
|
||||
ret = vmw_kms_helper_resource_prepare(srf, true, &ctx);
|
||||
ret = vmw_validation_add_resource(&val_ctx, srf, 0, NULL, NULL);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = vmw_validation_prepare(&val_ctx, &dev_priv->cmdbuf_mutex, true);
|
||||
if (ret)
|
||||
goto out_unref;
|
||||
|
||||
if (vfbs->is_bo_proxy) {
|
||||
ret = vmw_kms_update_proxy(srf, clips, num_clips, inc);
|
||||
if (ret)
|
||||
@ -954,9 +966,14 @@ int vmw_kms_stdu_surface_dirty(struct vmw_private *dev_priv,
|
||||
dest_x, dest_y, num_clips, inc,
|
||||
&sdirty.base);
|
||||
out_finish:
|
||||
vmw_kms_helper_resource_finish(&ctx, out_fence);
|
||||
vmw_kms_helper_validation_finish(dev_priv, NULL, &val_ctx, out_fence,
|
||||
NULL);
|
||||
|
||||
return ret;
|
||||
|
||||
out_unref:
|
||||
vmw_validation_unref_lists(&val_ctx);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user