drm/msm: Remove struct_mutex usage
The remaining struct_mutex usage is just to serialize various gpu related things (submit/retire/recover/fault/etc), so replace struct_mutex with gpu->lock. Signed-off-by: Rob Clark <robdclark@chromium.org> Link: https://lore.kernel.org/r/20211109181117.591148-4-robdclark@gmail.com Signed-off-by: Rob Clark <robdclark@chromium.org>
This commit is contained in:
@@ -107,7 +107,7 @@ reset_set(void *data, u64 val)
|
|||||||
* try to reset an active GPU.
|
* try to reset an active GPU.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
mutex_lock(&dev->struct_mutex);
|
mutex_lock(&gpu->lock);
|
||||||
|
|
||||||
release_firmware(adreno_gpu->fw[ADRENO_FW_PM4]);
|
release_firmware(adreno_gpu->fw[ADRENO_FW_PM4]);
|
||||||
adreno_gpu->fw[ADRENO_FW_PM4] = NULL;
|
adreno_gpu->fw[ADRENO_FW_PM4] = NULL;
|
||||||
@@ -133,7 +133,7 @@ reset_set(void *data, u64 val)
|
|||||||
gpu->funcs->recover(gpu);
|
gpu->funcs->recover(gpu);
|
||||||
|
|
||||||
pm_runtime_put_sync(&gpu->pdev->dev);
|
pm_runtime_put_sync(&gpu->pdev->dev);
|
||||||
mutex_unlock(&dev->struct_mutex);
|
mutex_unlock(&gpu->lock);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -408,9 +408,9 @@ struct msm_gpu *adreno_load_gpu(struct drm_device *dev)
|
|||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
mutex_lock(&dev->struct_mutex);
|
mutex_lock(&gpu->lock);
|
||||||
ret = msm_gpu_hw_init(gpu);
|
ret = msm_gpu_hw_init(gpu);
|
||||||
mutex_unlock(&dev->struct_mutex);
|
mutex_unlock(&gpu->lock);
|
||||||
pm_runtime_put_autosuspend(&pdev->dev);
|
pm_runtime_put_autosuspend(&pdev->dev);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
DRM_DEV_ERROR(dev->dev, "gpu hw init failed: %d\n", ret);
|
DRM_DEV_ERROR(dev->dev, "gpu hw init failed: %d\n", ret);
|
||||||
|
|||||||
@@ -29,14 +29,14 @@ static int msm_gpu_show(struct seq_file *m, void *arg)
|
|||||||
struct msm_gpu *gpu = priv->gpu;
|
struct msm_gpu *gpu = priv->gpu;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
ret = mutex_lock_interruptible(&show_priv->dev->struct_mutex);
|
ret = mutex_lock_interruptible(&gpu->lock);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
drm_printf(&p, "%s Status:\n", gpu->name);
|
drm_printf(&p, "%s Status:\n", gpu->name);
|
||||||
gpu->funcs->show(gpu, show_priv->state, &p);
|
gpu->funcs->show(gpu, show_priv->state, &p);
|
||||||
|
|
||||||
mutex_unlock(&show_priv->dev->struct_mutex);
|
mutex_unlock(&gpu->lock);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@@ -48,9 +48,9 @@ static int msm_gpu_release(struct inode *inode, struct file *file)
|
|||||||
struct msm_drm_private *priv = show_priv->dev->dev_private;
|
struct msm_drm_private *priv = show_priv->dev->dev_private;
|
||||||
struct msm_gpu *gpu = priv->gpu;
|
struct msm_gpu *gpu = priv->gpu;
|
||||||
|
|
||||||
mutex_lock(&show_priv->dev->struct_mutex);
|
mutex_lock(&gpu->lock);
|
||||||
gpu->funcs->gpu_state_put(show_priv->state);
|
gpu->funcs->gpu_state_put(show_priv->state);
|
||||||
mutex_unlock(&show_priv->dev->struct_mutex);
|
mutex_unlock(&gpu->lock);
|
||||||
|
|
||||||
kfree(show_priv);
|
kfree(show_priv);
|
||||||
|
|
||||||
@@ -72,7 +72,7 @@ static int msm_gpu_open(struct inode *inode, struct file *file)
|
|||||||
if (!show_priv)
|
if (!show_priv)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
ret = mutex_lock_interruptible(&dev->struct_mutex);
|
ret = mutex_lock_interruptible(&gpu->lock);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto free_priv;
|
goto free_priv;
|
||||||
|
|
||||||
@@ -81,7 +81,7 @@ static int msm_gpu_open(struct inode *inode, struct file *file)
|
|||||||
show_priv->state = gpu->funcs->gpu_state_get(gpu);
|
show_priv->state = gpu->funcs->gpu_state_get(gpu);
|
||||||
pm_runtime_put_sync(&gpu->pdev->dev);
|
pm_runtime_put_sync(&gpu->pdev->dev);
|
||||||
|
|
||||||
mutex_unlock(&dev->struct_mutex);
|
mutex_unlock(&gpu->lock);
|
||||||
|
|
||||||
if (IS_ERR(show_priv->state)) {
|
if (IS_ERR(show_priv->state)) {
|
||||||
ret = PTR_ERR(show_priv->state);
|
ret = PTR_ERR(show_priv->state);
|
||||||
|
|||||||
@@ -150,7 +150,7 @@ int msm_gpu_hw_init(struct msm_gpu *gpu)
|
|||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
WARN_ON(!mutex_is_locked(&gpu->dev->struct_mutex));
|
WARN_ON(!mutex_is_locked(&gpu->lock));
|
||||||
|
|
||||||
if (!gpu->needs_hw_init)
|
if (!gpu->needs_hw_init)
|
||||||
return 0;
|
return 0;
|
||||||
@@ -361,7 +361,7 @@ static void recover_worker(struct kthread_work *work)
|
|||||||
char *comm = NULL, *cmd = NULL;
|
char *comm = NULL, *cmd = NULL;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
mutex_lock(&dev->struct_mutex);
|
mutex_lock(&gpu->lock);
|
||||||
|
|
||||||
DRM_DEV_ERROR(dev->dev, "%s: hangcheck recover!\n", gpu->name);
|
DRM_DEV_ERROR(dev->dev, "%s: hangcheck recover!\n", gpu->name);
|
||||||
|
|
||||||
@@ -442,7 +442,7 @@ static void recover_worker(struct kthread_work *work)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
mutex_unlock(&dev->struct_mutex);
|
mutex_unlock(&gpu->lock);
|
||||||
|
|
||||||
msm_gpu_retire(gpu);
|
msm_gpu_retire(gpu);
|
||||||
}
|
}
|
||||||
@@ -450,12 +450,11 @@ static void recover_worker(struct kthread_work *work)
|
|||||||
static void fault_worker(struct kthread_work *work)
|
static void fault_worker(struct kthread_work *work)
|
||||||
{
|
{
|
||||||
struct msm_gpu *gpu = container_of(work, struct msm_gpu, fault_work);
|
struct msm_gpu *gpu = container_of(work, struct msm_gpu, fault_work);
|
||||||
struct drm_device *dev = gpu->dev;
|
|
||||||
struct msm_gem_submit *submit;
|
struct msm_gem_submit *submit;
|
||||||
struct msm_ringbuffer *cur_ring = gpu->funcs->active_ring(gpu);
|
struct msm_ringbuffer *cur_ring = gpu->funcs->active_ring(gpu);
|
||||||
char *comm = NULL, *cmd = NULL;
|
char *comm = NULL, *cmd = NULL;
|
||||||
|
|
||||||
mutex_lock(&dev->struct_mutex);
|
mutex_lock(&gpu->lock);
|
||||||
|
|
||||||
submit = find_submit(cur_ring, cur_ring->memptrs->fence + 1);
|
submit = find_submit(cur_ring, cur_ring->memptrs->fence + 1);
|
||||||
if (submit && submit->fault_dumped)
|
if (submit && submit->fault_dumped)
|
||||||
@@ -490,7 +489,7 @@ resume_smmu:
|
|||||||
memset(&gpu->fault_info, 0, sizeof(gpu->fault_info));
|
memset(&gpu->fault_info, 0, sizeof(gpu->fault_info));
|
||||||
gpu->aspace->mmu->funcs->resume_translation(gpu->aspace->mmu);
|
gpu->aspace->mmu->funcs->resume_translation(gpu->aspace->mmu);
|
||||||
|
|
||||||
mutex_unlock(&dev->struct_mutex);
|
mutex_unlock(&gpu->lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void hangcheck_timer_reset(struct msm_gpu *gpu)
|
static void hangcheck_timer_reset(struct msm_gpu *gpu)
|
||||||
@@ -733,7 +732,7 @@ void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
|
|||||||
struct msm_ringbuffer *ring = submit->ring;
|
struct msm_ringbuffer *ring = submit->ring;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
|
WARN_ON(!mutex_is_locked(&gpu->lock));
|
||||||
|
|
||||||
pm_runtime_get_sync(&gpu->pdev->dev);
|
pm_runtime_get_sync(&gpu->pdev->dev);
|
||||||
|
|
||||||
@@ -848,6 +847,7 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
|
|||||||
|
|
||||||
INIT_LIST_HEAD(&gpu->active_list);
|
INIT_LIST_HEAD(&gpu->active_list);
|
||||||
mutex_init(&gpu->active_lock);
|
mutex_init(&gpu->active_lock);
|
||||||
|
mutex_init(&gpu->lock);
|
||||||
kthread_init_work(&gpu->retire_work, retire_worker);
|
kthread_init_work(&gpu->retire_work, retire_worker);
|
||||||
kthread_init_work(&gpu->recover_work, recover_worker);
|
kthread_init_work(&gpu->recover_work, recover_worker);
|
||||||
kthread_init_work(&gpu->fault_work, fault_worker);
|
kthread_init_work(&gpu->fault_work, fault_worker);
|
||||||
|
|||||||
@@ -161,13 +161,23 @@ struct msm_gpu {
|
|||||||
*/
|
*/
|
||||||
struct list_head active_list;
|
struct list_head active_list;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* lock:
|
||||||
|
*
|
||||||
|
* General lock for serializing all the gpu things.
|
||||||
|
*
|
||||||
|
* TODO move to per-ring locking where feasible (ie. submit/retire
|
||||||
|
* path, etc)
|
||||||
|
*/
|
||||||
|
struct mutex lock;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* active_submits:
|
* active_submits:
|
||||||
*
|
*
|
||||||
* The number of submitted but not yet retired submits, used to
|
* The number of submitted but not yet retired submits, used to
|
||||||
* determine transitions between active and idle.
|
* determine transitions between active and idle.
|
||||||
*
|
*
|
||||||
* Protected by lock
|
* Protected by active_lock
|
||||||
*/
|
*/
|
||||||
int active_submits;
|
int active_submits;
|
||||||
|
|
||||||
@@ -548,28 +558,28 @@ static inline struct msm_gpu_state *msm_gpu_crashstate_get(struct msm_gpu *gpu)
|
|||||||
{
|
{
|
||||||
struct msm_gpu_state *state = NULL;
|
struct msm_gpu_state *state = NULL;
|
||||||
|
|
||||||
mutex_lock(&gpu->dev->struct_mutex);
|
mutex_lock(&gpu->lock);
|
||||||
|
|
||||||
if (gpu->crashstate) {
|
if (gpu->crashstate) {
|
||||||
kref_get(&gpu->crashstate->ref);
|
kref_get(&gpu->crashstate->ref);
|
||||||
state = gpu->crashstate;
|
state = gpu->crashstate;
|
||||||
}
|
}
|
||||||
|
|
||||||
mutex_unlock(&gpu->dev->struct_mutex);
|
mutex_unlock(&gpu->lock);
|
||||||
|
|
||||||
return state;
|
return state;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void msm_gpu_crashstate_put(struct msm_gpu *gpu)
|
static inline void msm_gpu_crashstate_put(struct msm_gpu *gpu)
|
||||||
{
|
{
|
||||||
mutex_lock(&gpu->dev->struct_mutex);
|
mutex_lock(&gpu->lock);
|
||||||
|
|
||||||
if (gpu->crashstate) {
|
if (gpu->crashstate) {
|
||||||
if (gpu->funcs->gpu_state_put(gpu->crashstate))
|
if (gpu->funcs->gpu_state_put(gpu->crashstate))
|
||||||
gpu->crashstate = NULL;
|
gpu->crashstate = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
mutex_unlock(&gpu->dev->struct_mutex);
|
mutex_unlock(&gpu->lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|||||||
@@ -155,9 +155,12 @@ static int perf_open(struct inode *inode, struct file *file)
|
|||||||
struct msm_gpu *gpu = priv->gpu;
|
struct msm_gpu *gpu = priv->gpu;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
mutex_lock(&dev->struct_mutex);
|
if (!gpu)
|
||||||
|
return -ENODEV;
|
||||||
|
|
||||||
if (perf->open || !gpu) {
|
mutex_lock(&gpu->lock);
|
||||||
|
|
||||||
|
if (perf->open) {
|
||||||
ret = -EBUSY;
|
ret = -EBUSY;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
@@ -171,7 +174,7 @@ static int perf_open(struct inode *inode, struct file *file)
|
|||||||
perf->next_jiffies = jiffies + SAMPLE_TIME;
|
perf->next_jiffies = jiffies + SAMPLE_TIME;
|
||||||
|
|
||||||
out:
|
out:
|
||||||
mutex_unlock(&dev->struct_mutex);
|
mutex_unlock(&gpu->lock);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -86,7 +86,7 @@ struct msm_rd_state {
|
|||||||
struct msm_gem_submit *submit;
|
struct msm_gem_submit *submit;
|
||||||
|
|
||||||
/* fifo access is synchronized on the producer side by
|
/* fifo access is synchronized on the producer side by
|
||||||
* struct_mutex held by submit code (otherwise we could
|
* gpu->lock held by submit code (otherwise we could
|
||||||
* end up w/ cmds logged in different order than they
|
* end up w/ cmds logged in different order than they
|
||||||
* were executed). And read_lock synchronizes the reads
|
* were executed). And read_lock synchronizes the reads
|
||||||
*/
|
*/
|
||||||
@@ -181,9 +181,12 @@ static int rd_open(struct inode *inode, struct file *file)
|
|||||||
uint32_t gpu_id;
|
uint32_t gpu_id;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
mutex_lock(&dev->struct_mutex);
|
if (!gpu)
|
||||||
|
return -ENODEV;
|
||||||
|
|
||||||
if (rd->open || !gpu) {
|
mutex_lock(&gpu->lock);
|
||||||
|
|
||||||
|
if (rd->open) {
|
||||||
ret = -EBUSY;
|
ret = -EBUSY;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
@@ -200,7 +203,7 @@ static int rd_open(struct inode *inode, struct file *file)
|
|||||||
rd_write_section(rd, RD_GPU_ID, &gpu_id, sizeof(gpu_id));
|
rd_write_section(rd, RD_GPU_ID, &gpu_id, sizeof(gpu_id));
|
||||||
|
|
||||||
out:
|
out:
|
||||||
mutex_unlock(&dev->struct_mutex);
|
mutex_unlock(&gpu->lock);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -340,11 +343,10 @@ out_unlock:
|
|||||||
msm_gem_unlock(&obj->base);
|
msm_gem_unlock(&obj->base);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* called under struct_mutex */
|
/* called under gpu->lock */
|
||||||
void msm_rd_dump_submit(struct msm_rd_state *rd, struct msm_gem_submit *submit,
|
void msm_rd_dump_submit(struct msm_rd_state *rd, struct msm_gem_submit *submit,
|
||||||
const char *fmt, ...)
|
const char *fmt, ...)
|
||||||
{
|
{
|
||||||
struct drm_device *dev = submit->dev;
|
|
||||||
struct task_struct *task;
|
struct task_struct *task;
|
||||||
char msg[256];
|
char msg[256];
|
||||||
int i, n;
|
int i, n;
|
||||||
@@ -355,7 +357,7 @@ void msm_rd_dump_submit(struct msm_rd_state *rd, struct msm_gem_submit *submit,
|
|||||||
/* writing into fifo is serialized by caller, and
|
/* writing into fifo is serialized by caller, and
|
||||||
* rd->read_lock is used to serialize the reads
|
* rd->read_lock is used to serialize the reads
|
||||||
*/
|
*/
|
||||||
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
|
WARN_ON(!mutex_is_locked(&submit->gpu->lock));
|
||||||
|
|
||||||
if (fmt) {
|
if (fmt) {
|
||||||
va_list args;
|
va_list args;
|
||||||
|
|||||||
@@ -21,11 +21,11 @@ static struct dma_fence *msm_job_run(struct drm_sched_job *job)
|
|||||||
pm_runtime_get_sync(&gpu->pdev->dev);
|
pm_runtime_get_sync(&gpu->pdev->dev);
|
||||||
|
|
||||||
/* TODO move submit path over to using a per-ring lock.. */
|
/* TODO move submit path over to using a per-ring lock.. */
|
||||||
mutex_lock(&gpu->dev->struct_mutex);
|
mutex_lock(&gpu->lock);
|
||||||
|
|
||||||
msm_gpu_submit(gpu, submit);
|
msm_gpu_submit(gpu, submit);
|
||||||
|
|
||||||
mutex_unlock(&gpu->dev->struct_mutex);
|
mutex_unlock(&gpu->lock);
|
||||||
|
|
||||||
pm_runtime_put(&gpu->pdev->dev);
|
pm_runtime_put(&gpu->pdev->dev);
|
||||||
|
|
||||||
|
|||||||
Reference in New Issue
Block a user