drm/msm: Drop priv->lastctx
cur_ctx_seqno already does the same thing, but handles the edge cases where a refcnt'd context can live after lastclose. So let's not have two ways to do the same thing. Signed-off-by: Rob Clark <robdclark@chromium.org> Reviewed-by: Akhil P Oommen <akhilpo@codeaurora.org> Link: https://lore.kernel.org/r/20211109181117.591148-3-robdclark@gmail.com Signed-off-by: Rob Clark <robdclark@chromium.org>
This commit is contained in:
@@ -12,7 +12,6 @@ static bool a2xx_idle(struct msm_gpu *gpu);
|
|||||||
|
|
||||||
static void a2xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
|
static void a2xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
|
||||||
{
|
{
|
||||||
struct msm_drm_private *priv = gpu->dev->dev_private;
|
|
||||||
struct msm_ringbuffer *ring = submit->ring;
|
struct msm_ringbuffer *ring = submit->ring;
|
||||||
unsigned int i;
|
unsigned int i;
|
||||||
|
|
||||||
@@ -23,7 +22,7 @@ static void a2xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
|
|||||||
break;
|
break;
|
||||||
case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
|
case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
|
||||||
/* ignore if there has not been a ctx switch: */
|
/* ignore if there has not been a ctx switch: */
|
||||||
if (priv->lastctx == submit->queue->ctx)
|
if (gpu->cur_ctx_seqno == submit->queue->ctx->seqno)
|
||||||
break;
|
break;
|
||||||
fallthrough;
|
fallthrough;
|
||||||
case MSM_SUBMIT_CMD_BUF:
|
case MSM_SUBMIT_CMD_BUF:
|
||||||
|
|||||||
@@ -30,7 +30,6 @@ static bool a3xx_idle(struct msm_gpu *gpu);
|
|||||||
|
|
||||||
static void a3xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
|
static void a3xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
|
||||||
{
|
{
|
||||||
struct msm_drm_private *priv = gpu->dev->dev_private;
|
|
||||||
struct msm_ringbuffer *ring = submit->ring;
|
struct msm_ringbuffer *ring = submit->ring;
|
||||||
unsigned int i;
|
unsigned int i;
|
||||||
|
|
||||||
@@ -41,7 +40,7 @@ static void a3xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
|
|||||||
break;
|
break;
|
||||||
case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
|
case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
|
||||||
/* ignore if there has not been a ctx switch: */
|
/* ignore if there has not been a ctx switch: */
|
||||||
if (priv->lastctx == submit->queue->ctx)
|
if (gpu->cur_ctx_seqno == submit->queue->ctx->seqno)
|
||||||
break;
|
break;
|
||||||
fallthrough;
|
fallthrough;
|
||||||
case MSM_SUBMIT_CMD_BUF:
|
case MSM_SUBMIT_CMD_BUF:
|
||||||
|
|||||||
@@ -24,7 +24,6 @@ static bool a4xx_idle(struct msm_gpu *gpu);
|
|||||||
|
|
||||||
static void a4xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
|
static void a4xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
|
||||||
{
|
{
|
||||||
struct msm_drm_private *priv = gpu->dev->dev_private;
|
|
||||||
struct msm_ringbuffer *ring = submit->ring;
|
struct msm_ringbuffer *ring = submit->ring;
|
||||||
unsigned int i;
|
unsigned int i;
|
||||||
|
|
||||||
@@ -35,7 +34,7 @@ static void a4xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
|
|||||||
break;
|
break;
|
||||||
case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
|
case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
|
||||||
/* ignore if there has not been a ctx switch: */
|
/* ignore if there has not been a ctx switch: */
|
||||||
if (priv->lastctx == submit->queue->ctx)
|
if (gpu->cur_ctx_seqno == submit->queue->ctx->seqno)
|
||||||
break;
|
break;
|
||||||
fallthrough;
|
fallthrough;
|
||||||
case MSM_SUBMIT_CMD_BUF:
|
case MSM_SUBMIT_CMD_BUF:
|
||||||
|
|||||||
@@ -65,7 +65,6 @@ void a5xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring,
|
|||||||
|
|
||||||
static void a5xx_submit_in_rb(struct msm_gpu *gpu, struct msm_gem_submit *submit)
|
static void a5xx_submit_in_rb(struct msm_gpu *gpu, struct msm_gem_submit *submit)
|
||||||
{
|
{
|
||||||
struct msm_drm_private *priv = gpu->dev->dev_private;
|
|
||||||
struct msm_ringbuffer *ring = submit->ring;
|
struct msm_ringbuffer *ring = submit->ring;
|
||||||
struct msm_gem_object *obj;
|
struct msm_gem_object *obj;
|
||||||
uint32_t *ptr, dwords;
|
uint32_t *ptr, dwords;
|
||||||
@@ -76,7 +75,7 @@ static void a5xx_submit_in_rb(struct msm_gpu *gpu, struct msm_gem_submit *submit
|
|||||||
case MSM_SUBMIT_CMD_IB_TARGET_BUF:
|
case MSM_SUBMIT_CMD_IB_TARGET_BUF:
|
||||||
break;
|
break;
|
||||||
case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
|
case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
|
||||||
if (priv->lastctx == submit->queue->ctx)
|
if (gpu->cur_ctx_seqno == submit->queue->ctx->seqno)
|
||||||
break;
|
break;
|
||||||
fallthrough;
|
fallthrough;
|
||||||
case MSM_SUBMIT_CMD_BUF:
|
case MSM_SUBMIT_CMD_BUF:
|
||||||
@@ -126,12 +125,11 @@ static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
|
|||||||
{
|
{
|
||||||
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
|
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
|
||||||
struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
|
struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
|
||||||
struct msm_drm_private *priv = gpu->dev->dev_private;
|
|
||||||
struct msm_ringbuffer *ring = submit->ring;
|
struct msm_ringbuffer *ring = submit->ring;
|
||||||
unsigned int i, ibs = 0;
|
unsigned int i, ibs = 0;
|
||||||
|
|
||||||
if (IS_ENABLED(CONFIG_DRM_MSM_GPU_SUDO) && submit->in_rb) {
|
if (IS_ENABLED(CONFIG_DRM_MSM_GPU_SUDO) && submit->in_rb) {
|
||||||
priv->lastctx = NULL;
|
gpu->cur_ctx_seqno = 0;
|
||||||
a5xx_submit_in_rb(gpu, submit);
|
a5xx_submit_in_rb(gpu, submit);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
@@ -166,7 +164,7 @@ static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
|
|||||||
case MSM_SUBMIT_CMD_IB_TARGET_BUF:
|
case MSM_SUBMIT_CMD_IB_TARGET_BUF:
|
||||||
break;
|
break;
|
||||||
case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
|
case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
|
||||||
if (priv->lastctx == submit->queue->ctx)
|
if (gpu->cur_ctx_seqno == submit->queue->ctx->seqno)
|
||||||
break;
|
break;
|
||||||
fallthrough;
|
fallthrough;
|
||||||
case MSM_SUBMIT_CMD_BUF:
|
case MSM_SUBMIT_CMD_BUF:
|
||||||
|
|||||||
@@ -106,7 +106,7 @@ static void a6xx_set_pagetable(struct a6xx_gpu *a6xx_gpu,
|
|||||||
u32 asid;
|
u32 asid;
|
||||||
u64 memptr = rbmemptr(ring, ttbr0);
|
u64 memptr = rbmemptr(ring, ttbr0);
|
||||||
|
|
||||||
if (ctx->seqno == a6xx_gpu->cur_ctx_seqno)
|
if (ctx->seqno == a6xx_gpu->base.base.cur_ctx_seqno)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
if (msm_iommu_pagetable_params(ctx->aspace->mmu, &ttbr, &asid))
|
if (msm_iommu_pagetable_params(ctx->aspace->mmu, &ttbr, &asid))
|
||||||
@@ -138,14 +138,11 @@ static void a6xx_set_pagetable(struct a6xx_gpu *a6xx_gpu,
|
|||||||
|
|
||||||
OUT_PKT7(ring, CP_EVENT_WRITE, 1);
|
OUT_PKT7(ring, CP_EVENT_WRITE, 1);
|
||||||
OUT_RING(ring, 0x31);
|
OUT_RING(ring, 0x31);
|
||||||
|
|
||||||
a6xx_gpu->cur_ctx_seqno = ctx->seqno;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
|
static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
|
||||||
{
|
{
|
||||||
unsigned int index = submit->seqno % MSM_GPU_SUBMIT_STATS_COUNT;
|
unsigned int index = submit->seqno % MSM_GPU_SUBMIT_STATS_COUNT;
|
||||||
struct msm_drm_private *priv = gpu->dev->dev_private;
|
|
||||||
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
|
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
|
||||||
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
|
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
|
||||||
struct msm_ringbuffer *ring = submit->ring;
|
struct msm_ringbuffer *ring = submit->ring;
|
||||||
@@ -177,7 +174,7 @@ static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
|
|||||||
case MSM_SUBMIT_CMD_IB_TARGET_BUF:
|
case MSM_SUBMIT_CMD_IB_TARGET_BUF:
|
||||||
break;
|
break;
|
||||||
case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
|
case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
|
||||||
if (priv->lastctx == submit->queue->ctx)
|
if (gpu->cur_ctx_seqno == submit->queue->ctx->seqno)
|
||||||
break;
|
break;
|
||||||
fallthrough;
|
fallthrough;
|
||||||
case MSM_SUBMIT_CMD_BUF:
|
case MSM_SUBMIT_CMD_BUF:
|
||||||
@@ -1081,7 +1078,7 @@ static int hw_init(struct msm_gpu *gpu)
|
|||||||
/* Always come up on rb 0 */
|
/* Always come up on rb 0 */
|
||||||
a6xx_gpu->cur_ring = gpu->rb[0];
|
a6xx_gpu->cur_ring = gpu->rb[0];
|
||||||
|
|
||||||
a6xx_gpu->cur_ctx_seqno = 0;
|
gpu->cur_ctx_seqno = 0;
|
||||||
|
|
||||||
/* Enable the SQE_to start the CP engine */
|
/* Enable the SQE_to start the CP engine */
|
||||||
gpu_write(gpu, REG_A6XX_CP_SQE_CNTL, 1);
|
gpu_write(gpu, REG_A6XX_CP_SQE_CNTL, 1);
|
||||||
|
|||||||
@@ -20,16 +20,6 @@ struct a6xx_gpu {
|
|||||||
|
|
||||||
struct msm_ringbuffer *cur_ring;
|
struct msm_ringbuffer *cur_ring;
|
||||||
|
|
||||||
/**
|
|
||||||
* cur_ctx_seqno:
|
|
||||||
*
|
|
||||||
* The ctx->seqno value of the context with current pgtables
|
|
||||||
* installed. Tracked by seqno rather than pointer value to
|
|
||||||
* avoid dangling pointers, and cases where a ctx can be freed
|
|
||||||
* and a new one created with the same address.
|
|
||||||
*/
|
|
||||||
int cur_ctx_seqno;
|
|
||||||
|
|
||||||
struct a6xx_gmu gmu;
|
struct a6xx_gmu gmu;
|
||||||
|
|
||||||
struct drm_gem_object *shadow_bo;
|
struct drm_gem_object *shadow_bo;
|
||||||
|
|||||||
@@ -752,14 +752,8 @@ static void context_close(struct msm_file_private *ctx)
|
|||||||
|
|
||||||
static void msm_postclose(struct drm_device *dev, struct drm_file *file)
|
static void msm_postclose(struct drm_device *dev, struct drm_file *file)
|
||||||
{
|
{
|
||||||
struct msm_drm_private *priv = dev->dev_private;
|
|
||||||
struct msm_file_private *ctx = file->driver_priv;
|
struct msm_file_private *ctx = file->driver_priv;
|
||||||
|
|
||||||
mutex_lock(&dev->struct_mutex);
|
|
||||||
if (ctx == priv->lastctx)
|
|
||||||
priv->lastctx = NULL;
|
|
||||||
mutex_unlock(&dev->struct_mutex);
|
|
||||||
|
|
||||||
context_close(ctx);
|
context_close(ctx);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -158,7 +158,7 @@ struct msm_drm_private {
|
|||||||
|
|
||||||
/* when we have more than one 'msm_gpu' these need to be an array: */
|
/* when we have more than one 'msm_gpu' these need to be an array: */
|
||||||
struct msm_gpu *gpu;
|
struct msm_gpu *gpu;
|
||||||
struct msm_file_private *lastctx;
|
|
||||||
/* gpu is only set on open(), but we need this info earlier */
|
/* gpu is only set on open(), but we need this info earlier */
|
||||||
bool is_a2xx;
|
bool is_a2xx;
|
||||||
bool has_cached_coherent;
|
bool has_cached_coherent;
|
||||||
|
|||||||
@@ -763,7 +763,7 @@ void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
|
|||||||
mutex_unlock(&gpu->active_lock);
|
mutex_unlock(&gpu->active_lock);
|
||||||
|
|
||||||
gpu->funcs->submit(gpu, submit);
|
gpu->funcs->submit(gpu, submit);
|
||||||
priv->lastctx = submit->queue->ctx;
|
gpu->cur_ctx_seqno = submit->queue->ctx->seqno;
|
||||||
|
|
||||||
hangcheck_timer_reset(gpu);
|
hangcheck_timer_reset(gpu);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -144,6 +144,17 @@ struct msm_gpu {
|
|||||||
struct msm_ringbuffer *rb[MSM_GPU_MAX_RINGS];
|
struct msm_ringbuffer *rb[MSM_GPU_MAX_RINGS];
|
||||||
int nr_rings;
|
int nr_rings;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* cur_ctx_seqno:
|
||||||
|
*
|
||||||
|
* The ctx->seqno value of the last context to submit rendering,
|
||||||
|
* and the one with current pgtables installed (for generations
|
||||||
|
* that support per-context pgtables). Tracked by seqno rather
|
||||||
|
* than pointer value to avoid dangling pointers, and cases where
|
||||||
|
* a ctx can be freed and a new one created with the same address.
|
||||||
|
*/
|
||||||
|
int cur_ctx_seqno;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* List of GEM active objects on this gpu. Protected by
|
* List of GEM active objects on this gpu. Protected by
|
||||||
* msm_drm_private::mm_lock
|
* msm_drm_private::mm_lock
|
||||||
|
|||||||
Reference in New Issue
Block a user