From 987bf116445db5d63a5c2ed94c4479687d9c9973 Mon Sep 17 00:00:00 2001 From: Shirish S Date: Mon, 24 Sep 2018 19:01:47 +0530 Subject: [PATCH 1/2] drm/amd/display: Signal hw_done() after waiting for flip_done() In amdgpu_dm_commit_tail(), wait until flip_done() is signaled before we signal hw_done(). [Why] This is to temporarily address a paging error that occurs when a nonblocking commit contends with another commit, particularly in a mirrored display configuration where at least 2 CRTCs are updated. The error occurs in drm_atomic_helper_wait_for_flip_done(), when we attempt to access the contents of new_crtc_state->commit. Here's the sequence for a mirrored 2 display setup (irrelevant steps left out for clarity): **THREAD 1** | **THREAD 2** | Initialize atomic state for flip | | Queue worker | ... | Do work for flip | | Signal hw_done() on CRTC 1 | Signal hw_done() on CRTC 2 | | Wait for flip_done() on CRTC 1 <---- **PREEMPTED BY THREAD 1** Initialize atomic state for cursor | update (1) | | Do cursor update work on both CRTCs | | Clear atomic state (2) | **DONE** | ... | | Wait for flip_done() on CRTC 2 | *ERROR* | The issue starts with (1). When the atomic state is initialized, the current CRTC states are duplicated to be the new_crtc_states, and referenced to be the old_crtc_states. (The new_crtc_states are to be filled with update data.) Some things to note: * Due to the mirrored configuration, the cursor updates on both CRTCs. * At this point, the pflip IRQ has already been handled, and flip_done signaled on all CRTCs. The cursor commit can therefore continue. * The old_crtc_states used by the cursor update are the **same states** as the new_crtc_states used by the flip worker. At (2), the old_crtc_state is freed (*), and the cursor commit completes. We then context switch back to the flip worker, where we attempt to access the new_crtc_state->commit object. This is problematic, as this state has already been freed. (*) Technically, 'state->crtcs[i].state' is freed, which was made to reference old_crtc_state in drm_atomic_helper_swap_state() [How] By moving hw_done() after wait_for_flip_done(), we're guaranteed that the new_crtc_state (from the flip worker's perspective) still exists. This is because any other commit will be blocked, waiting for the hw_done() signal. Note that both the i915 and imx drivers have this sequence flipped already, masking this problem. Signed-off-by: Shirish S Signed-off-by: Leo Li Reviewed-by: Harry Wentland Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 96875950845a..6903fe6c894b 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -4633,12 +4633,18 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) } spin_unlock_irqrestore(&adev->ddev->event_lock, flags); - /* Signal HW programming completion */ - drm_atomic_helper_commit_hw_done(state); if (wait_for_vblank) drm_atomic_helper_wait_for_flip_done(dev, state); + /* + * FIXME: + * Delay hw_done() until flip_done() is signaled. This is to block + * another commit from freeing the CRTC state while we're still + * waiting on flip_done. + */ + drm_atomic_helper_commit_hw_done(state); + drm_atomic_helper_cleanup_planes(dev, state); /* Finally, drop a runtime PM reference for each newly disabled CRTC, From 11b29c9e25788d0afb2ddb67bcd89424bd25f2f7 Mon Sep 17 00:00:00 2001 From: Felix Kuehling Date: Tue, 2 Oct 2018 18:41:12 -0400 Subject: [PATCH 2/2] drm/amdkfd: Fix incorrect use of process->mm MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This mm_struct pointer should never be dereferenced. If running in a user thread, just use current->mm. If running in a kernel worker use get_task_mm to get a safe reference to the mm_struct. Reviewed-by: Oded Gabbay Acked-by: Christian König Signed-off-by: Felix Kuehling Signed-off-by: Alex Deucher --- .../drm/amd/amdkfd/kfd_device_queue_manager.c | 37 +++++++++++++++---- 1 file changed, 29 insertions(+), 8 deletions(-) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c index ec0d62a16e53..4f22e745df51 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c @@ -358,8 +358,8 @@ static int create_compute_queue_nocpsch(struct device_queue_manager *dqm, struct queue *q, struct qcm_process_device *qpd) { - int retval; struct mqd_manager *mqd_mgr; + int retval; mqd_mgr = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_COMPUTE); if (!mqd_mgr) @@ -387,8 +387,12 @@ static int create_compute_queue_nocpsch(struct device_queue_manager *dqm, if (!q->properties.is_active) return 0; - retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, q->pipe, q->queue, - &q->properties, q->process->mm); + if (WARN(q->process->mm != current->mm, + "should only run in user thread")) + retval = -EFAULT; + else + retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, q->pipe, q->queue, + &q->properties, current->mm); if (retval) goto out_uninit_mqd; @@ -545,9 +549,15 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q) retval = map_queues_cpsch(dqm); else if (q->properties.is_active && (q->properties.type == KFD_QUEUE_TYPE_COMPUTE || - q->properties.type == KFD_QUEUE_TYPE_SDMA)) - retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, q->pipe, q->queue, - &q->properties, q->process->mm); + q->properties.type == KFD_QUEUE_TYPE_SDMA)) { + if (WARN(q->process->mm != current->mm, + "should only run in user thread")) + retval = -EFAULT; + else + retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, + q->pipe, q->queue, + &q->properties, current->mm); + } out_unlock: dqm_unlock(dqm); @@ -653,6 +663,7 @@ out: static int restore_process_queues_nocpsch(struct device_queue_manager *dqm, struct qcm_process_device *qpd) { + struct mm_struct *mm = NULL; struct queue *q; struct mqd_manager *mqd_mgr; struct kfd_process_device *pdd; @@ -686,6 +697,15 @@ static int restore_process_queues_nocpsch(struct device_queue_manager *dqm, kfd_flush_tlb(pdd); } + /* Take a safe reference to the mm_struct, which may otherwise + * disappear even while the kfd_process is still referenced. + */ + mm = get_task_mm(pdd->process->lead_thread); + if (!mm) { + retval = -EFAULT; + goto out; + } + /* activate all active queues on the qpd */ list_for_each_entry(q, &qpd->queues_list, list) { if (!q->properties.is_evicted) @@ -700,14 +720,15 @@ static int restore_process_queues_nocpsch(struct device_queue_manager *dqm, q->properties.is_evicted = false; q->properties.is_active = true; retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, q->pipe, - q->queue, &q->properties, - q->process->mm); + q->queue, &q->properties, mm); if (retval) goto out; dqm->queue_count++; } qpd->evicted = 0; out: + if (mm) + mmput(mm); dqm_unlock(dqm); return retval; }