mirror of
https://github.com/torvalds/linux.git
synced 2024-11-24 13:11:40 +00:00
Merge tag 'amd-drm-fixes-5.18-2022-04-27' of https://gitlab.freedesktop.org/agd5f/linux into drm-fixes
amd-drm-fixes-5.18-2022-04-27: amdgpu: - Runtime pm fix - DCN memory leak fix in error path - SI DPM deadlock fix - S0ix fix amdkfd: - GWS fix - GWS support for CRIU Signed-off-by: Dave Airlie <airlied@redhat.com> From: Alex Deucher <alexander.deucher@amd.com> Link: https://patchwork.freedesktop.org/patch/msgid/20220428023232.5794-1-alexander.deucher@amd.com
This commit is contained in:
commit
9d9f720733
@ -2395,6 +2395,71 @@ static int amdgpu_pmops_restore(struct device *dev)
|
||||
return amdgpu_device_resume(drm_dev, true);
|
||||
}
|
||||
|
||||
static int amdgpu_runtime_idle_check_display(struct device *dev)
|
||||
{
|
||||
struct pci_dev *pdev = to_pci_dev(dev);
|
||||
struct drm_device *drm_dev = pci_get_drvdata(pdev);
|
||||
struct amdgpu_device *adev = drm_to_adev(drm_dev);
|
||||
|
||||
if (adev->mode_info.num_crtc) {
|
||||
struct drm_connector *list_connector;
|
||||
struct drm_connector_list_iter iter;
|
||||
int ret = 0;
|
||||
|
||||
/* XXX: Return busy if any displays are connected to avoid
|
||||
* possible display wakeups after runtime resume due to
|
||||
* hotplug events in case any displays were connected while
|
||||
* the GPU was in suspend. Remove this once that is fixed.
|
||||
*/
|
||||
mutex_lock(&drm_dev->mode_config.mutex);
|
||||
drm_connector_list_iter_begin(drm_dev, &iter);
|
||||
drm_for_each_connector_iter(list_connector, &iter) {
|
||||
if (list_connector->status == connector_status_connected) {
|
||||
ret = -EBUSY;
|
||||
break;
|
||||
}
|
||||
}
|
||||
drm_connector_list_iter_end(&iter);
|
||||
mutex_unlock(&drm_dev->mode_config.mutex);
|
||||
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (amdgpu_device_has_dc_support(adev)) {
|
||||
struct drm_crtc *crtc;
|
||||
|
||||
drm_for_each_crtc(crtc, drm_dev) {
|
||||
drm_modeset_lock(&crtc->mutex, NULL);
|
||||
if (crtc->state->active)
|
||||
ret = -EBUSY;
|
||||
drm_modeset_unlock(&crtc->mutex);
|
||||
if (ret < 0)
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
mutex_lock(&drm_dev->mode_config.mutex);
|
||||
drm_modeset_lock(&drm_dev->mode_config.connection_mutex, NULL);
|
||||
|
||||
drm_connector_list_iter_begin(drm_dev, &iter);
|
||||
drm_for_each_connector_iter(list_connector, &iter) {
|
||||
if (list_connector->dpms == DRM_MODE_DPMS_ON) {
|
||||
ret = -EBUSY;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
drm_connector_list_iter_end(&iter);
|
||||
|
||||
drm_modeset_unlock(&drm_dev->mode_config.connection_mutex);
|
||||
mutex_unlock(&drm_dev->mode_config.mutex);
|
||||
}
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int amdgpu_pmops_runtime_suspend(struct device *dev)
|
||||
{
|
||||
struct pci_dev *pdev = to_pci_dev(dev);
|
||||
@ -2407,6 +2472,10 @@ static int amdgpu_pmops_runtime_suspend(struct device *dev)
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
ret = amdgpu_runtime_idle_check_display(dev);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* wait for all rings to drain before suspending */
|
||||
for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
|
||||
struct amdgpu_ring *ring = adev->rings[i];
|
||||
@ -2516,41 +2585,7 @@ static int amdgpu_pmops_runtime_idle(struct device *dev)
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
if (amdgpu_device_has_dc_support(adev)) {
|
||||
struct drm_crtc *crtc;
|
||||
|
||||
drm_for_each_crtc(crtc, drm_dev) {
|
||||
drm_modeset_lock(&crtc->mutex, NULL);
|
||||
if (crtc->state->active)
|
||||
ret = -EBUSY;
|
||||
drm_modeset_unlock(&crtc->mutex);
|
||||
if (ret < 0)
|
||||
break;
|
||||
}
|
||||
|
||||
} else {
|
||||
struct drm_connector *list_connector;
|
||||
struct drm_connector_list_iter iter;
|
||||
|
||||
mutex_lock(&drm_dev->mode_config.mutex);
|
||||
drm_modeset_lock(&drm_dev->mode_config.connection_mutex, NULL);
|
||||
|
||||
drm_connector_list_iter_begin(drm_dev, &iter);
|
||||
drm_for_each_connector_iter(list_connector, &iter) {
|
||||
if (list_connector->dpms == DRM_MODE_DPMS_ON) {
|
||||
ret = -EBUSY;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
drm_connector_list_iter_end(&iter);
|
||||
|
||||
drm_modeset_unlock(&drm_dev->mode_config.connection_mutex);
|
||||
mutex_unlock(&drm_dev->mode_config.mutex);
|
||||
}
|
||||
|
||||
if (ret == -EBUSY)
|
||||
DRM_DEBUG_DRIVER("failing to power off - crtc active\n");
|
||||
ret = amdgpu_runtime_idle_check_display(dev);
|
||||
|
||||
pm_runtime_mark_last_busy(dev);
|
||||
pm_runtime_autosuspend(dev);
|
||||
|
@ -1151,6 +1151,16 @@ static int gmc_v10_0_set_clockgating_state(void *handle,
|
||||
int r;
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
/*
|
||||
* The issue mmhub can't disconnect from DF with MMHUB clock gating being disabled
|
||||
* is a new problem observed at DF 3.0.3, however with the same suspend sequence not
|
||||
* seen any issue on the DF 3.0.2 series platform.
|
||||
*/
|
||||
if (adev->in_s0ix && adev->ip_versions[DF_HWIP][0] > IP_VERSION(3, 0, 2)) {
|
||||
dev_dbg(adev->dev, "keep mmhub clock gating being enabled for s0ix\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
r = adev->mmhub.funcs->set_clockgating(adev, state);
|
||||
if (r)
|
||||
return r;
|
||||
|
@ -130,19 +130,33 @@ void program_sh_mem_settings(struct device_queue_manager *dqm,
|
||||
}
|
||||
|
||||
static void increment_queue_count(struct device_queue_manager *dqm,
|
||||
enum kfd_queue_type type)
|
||||
struct qcm_process_device *qpd,
|
||||
struct queue *q)
|
||||
{
|
||||
dqm->active_queue_count++;
|
||||
if (type == KFD_QUEUE_TYPE_COMPUTE || type == KFD_QUEUE_TYPE_DIQ)
|
||||
if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE ||
|
||||
q->properties.type == KFD_QUEUE_TYPE_DIQ)
|
||||
dqm->active_cp_queue_count++;
|
||||
|
||||
if (q->properties.is_gws) {
|
||||
dqm->gws_queue_count++;
|
||||
qpd->mapped_gws_queue = true;
|
||||
}
|
||||
}
|
||||
|
||||
static void decrement_queue_count(struct device_queue_manager *dqm,
|
||||
enum kfd_queue_type type)
|
||||
struct qcm_process_device *qpd,
|
||||
struct queue *q)
|
||||
{
|
||||
dqm->active_queue_count--;
|
||||
if (type == KFD_QUEUE_TYPE_COMPUTE || type == KFD_QUEUE_TYPE_DIQ)
|
||||
if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE ||
|
||||
q->properties.type == KFD_QUEUE_TYPE_DIQ)
|
||||
dqm->active_cp_queue_count--;
|
||||
|
||||
if (q->properties.is_gws) {
|
||||
dqm->gws_queue_count--;
|
||||
qpd->mapped_gws_queue = false;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
@ -412,7 +426,7 @@ add_queue_to_list:
|
||||
list_add(&q->list, &qpd->queues_list);
|
||||
qpd->queue_count++;
|
||||
if (q->properties.is_active)
|
||||
increment_queue_count(dqm, q->properties.type);
|
||||
increment_queue_count(dqm, qpd, q);
|
||||
|
||||
/*
|
||||
* Unconditionally increment this counter, regardless of the queue's
|
||||
@ -601,13 +615,8 @@ static int destroy_queue_nocpsch_locked(struct device_queue_manager *dqm,
|
||||
deallocate_vmid(dqm, qpd, q);
|
||||
}
|
||||
qpd->queue_count--;
|
||||
if (q->properties.is_active) {
|
||||
decrement_queue_count(dqm, q->properties.type);
|
||||
if (q->properties.is_gws) {
|
||||
dqm->gws_queue_count--;
|
||||
qpd->mapped_gws_queue = false;
|
||||
}
|
||||
}
|
||||
if (q->properties.is_active)
|
||||
decrement_queue_count(dqm, qpd, q);
|
||||
|
||||
return retval;
|
||||
}
|
||||
@ -700,12 +709,11 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q,
|
||||
* dqm->active_queue_count to determine whether a new runlist must be
|
||||
* uploaded.
|
||||
*/
|
||||
if (q->properties.is_active && !prev_active)
|
||||
increment_queue_count(dqm, q->properties.type);
|
||||
else if (!q->properties.is_active && prev_active)
|
||||
decrement_queue_count(dqm, q->properties.type);
|
||||
|
||||
if (q->gws && !q->properties.is_gws) {
|
||||
if (q->properties.is_active && !prev_active) {
|
||||
increment_queue_count(dqm, &pdd->qpd, q);
|
||||
} else if (!q->properties.is_active && prev_active) {
|
||||
decrement_queue_count(dqm, &pdd->qpd, q);
|
||||
} else if (q->gws && !q->properties.is_gws) {
|
||||
if (q->properties.is_active) {
|
||||
dqm->gws_queue_count++;
|
||||
pdd->qpd.mapped_gws_queue = true;
|
||||
@ -767,11 +775,7 @@ static int evict_process_queues_nocpsch(struct device_queue_manager *dqm,
|
||||
mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
|
||||
q->properties.type)];
|
||||
q->properties.is_active = false;
|
||||
decrement_queue_count(dqm, q->properties.type);
|
||||
if (q->properties.is_gws) {
|
||||
dqm->gws_queue_count--;
|
||||
qpd->mapped_gws_queue = false;
|
||||
}
|
||||
decrement_queue_count(dqm, qpd, q);
|
||||
|
||||
if (WARN_ONCE(!dqm->sched_running, "Evict when stopped\n"))
|
||||
continue;
|
||||
@ -817,7 +821,7 @@ static int evict_process_queues_cpsch(struct device_queue_manager *dqm,
|
||||
continue;
|
||||
|
||||
q->properties.is_active = false;
|
||||
decrement_queue_count(dqm, q->properties.type);
|
||||
decrement_queue_count(dqm, qpd, q);
|
||||
}
|
||||
pdd->last_evict_timestamp = get_jiffies_64();
|
||||
retval = execute_queues_cpsch(dqm,
|
||||
@ -888,11 +892,7 @@ static int restore_process_queues_nocpsch(struct device_queue_manager *dqm,
|
||||
mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
|
||||
q->properties.type)];
|
||||
q->properties.is_active = true;
|
||||
increment_queue_count(dqm, q->properties.type);
|
||||
if (q->properties.is_gws) {
|
||||
dqm->gws_queue_count++;
|
||||
qpd->mapped_gws_queue = true;
|
||||
}
|
||||
increment_queue_count(dqm, qpd, q);
|
||||
|
||||
if (WARN_ONCE(!dqm->sched_running, "Restore when stopped\n"))
|
||||
continue;
|
||||
@ -950,7 +950,7 @@ static int restore_process_queues_cpsch(struct device_queue_manager *dqm,
|
||||
continue;
|
||||
|
||||
q->properties.is_active = true;
|
||||
increment_queue_count(dqm, q->properties.type);
|
||||
increment_queue_count(dqm, &pdd->qpd, q);
|
||||
}
|
||||
retval = execute_queues_cpsch(dqm,
|
||||
KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
|
||||
@ -1378,7 +1378,7 @@ static int create_kernel_queue_cpsch(struct device_queue_manager *dqm,
|
||||
dqm->total_queue_count);
|
||||
|
||||
list_add(&kq->list, &qpd->priv_queue_list);
|
||||
increment_queue_count(dqm, kq->queue->properties.type);
|
||||
increment_queue_count(dqm, qpd, kq->queue);
|
||||
qpd->is_debug = true;
|
||||
execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
|
||||
dqm_unlock(dqm);
|
||||
@ -1392,7 +1392,7 @@ static void destroy_kernel_queue_cpsch(struct device_queue_manager *dqm,
|
||||
{
|
||||
dqm_lock(dqm);
|
||||
list_del(&kq->list);
|
||||
decrement_queue_count(dqm, kq->queue->properties.type);
|
||||
decrement_queue_count(dqm, qpd, kq->queue);
|
||||
qpd->is_debug = false;
|
||||
execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0);
|
||||
/*
|
||||
@ -1467,7 +1467,7 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
|
||||
qpd->queue_count++;
|
||||
|
||||
if (q->properties.is_active) {
|
||||
increment_queue_count(dqm, q->properties.type);
|
||||
increment_queue_count(dqm, qpd, q);
|
||||
|
||||
execute_queues_cpsch(dqm,
|
||||
KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
|
||||
@ -1683,15 +1683,11 @@ static int destroy_queue_cpsch(struct device_queue_manager *dqm,
|
||||
list_del(&q->list);
|
||||
qpd->queue_count--;
|
||||
if (q->properties.is_active) {
|
||||
decrement_queue_count(dqm, q->properties.type);
|
||||
decrement_queue_count(dqm, qpd, q);
|
||||
retval = execute_queues_cpsch(dqm,
|
||||
KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
|
||||
if (retval == -ETIME)
|
||||
qpd->reset_wavefronts = true;
|
||||
if (q->properties.is_gws) {
|
||||
dqm->gws_queue_count--;
|
||||
qpd->mapped_gws_queue = false;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1932,7 +1928,7 @@ static int process_termination_cpsch(struct device_queue_manager *dqm,
|
||||
/* Clean all kernel queues */
|
||||
list_for_each_entry_safe(kq, kq_next, &qpd->priv_queue_list, list) {
|
||||
list_del(&kq->list);
|
||||
decrement_queue_count(dqm, kq->queue->properties.type);
|
||||
decrement_queue_count(dqm, qpd, kq->queue);
|
||||
qpd->is_debug = false;
|
||||
dqm->total_queue_count--;
|
||||
filter = KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES;
|
||||
@ -1945,13 +1941,8 @@ static int process_termination_cpsch(struct device_queue_manager *dqm,
|
||||
else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
|
||||
deallocate_sdma_queue(dqm, q);
|
||||
|
||||
if (q->properties.is_active) {
|
||||
decrement_queue_count(dqm, q->properties.type);
|
||||
if (q->properties.is_gws) {
|
||||
dqm->gws_queue_count--;
|
||||
qpd->mapped_gws_queue = false;
|
||||
}
|
||||
}
|
||||
if (q->properties.is_active)
|
||||
decrement_queue_count(dqm, qpd, q);
|
||||
|
||||
dqm->total_queue_count--;
|
||||
}
|
||||
|
@ -1103,7 +1103,7 @@ struct kfd_criu_queue_priv_data {
|
||||
uint32_t priority;
|
||||
uint32_t q_percent;
|
||||
uint32_t doorbell_id;
|
||||
uint32_t is_gws;
|
||||
uint32_t gws;
|
||||
uint32_t sdma_id;
|
||||
uint32_t eop_ring_buffer_size;
|
||||
uint32_t ctx_save_restore_area_size;
|
||||
|
@ -636,6 +636,8 @@ static int criu_checkpoint_queue(struct kfd_process_device *pdd,
|
||||
q_data->ctx_save_restore_area_size =
|
||||
q->properties.ctx_save_restore_area_size;
|
||||
|
||||
q_data->gws = !!q->gws;
|
||||
|
||||
ret = pqm_checkpoint_mqd(&pdd->process->pqm, q->properties.queue_id, mqd, ctl_stack);
|
||||
if (ret) {
|
||||
pr_err("Failed checkpoint queue_mqd (%d)\n", ret);
|
||||
@ -743,7 +745,6 @@ static void set_queue_properties_from_criu(struct queue_properties *qp,
|
||||
struct kfd_criu_queue_priv_data *q_data)
|
||||
{
|
||||
qp->is_interop = false;
|
||||
qp->is_gws = q_data->is_gws;
|
||||
qp->queue_percent = q_data->q_percent;
|
||||
qp->priority = q_data->priority;
|
||||
qp->queue_address = q_data->q_address;
|
||||
@ -826,12 +827,15 @@ int kfd_criu_restore_queue(struct kfd_process *p,
|
||||
NULL);
|
||||
if (ret) {
|
||||
pr_err("Failed to create new queue err:%d\n", ret);
|
||||
ret = -EINVAL;
|
||||
goto exit;
|
||||
}
|
||||
|
||||
if (q_data->gws)
|
||||
ret = pqm_set_gws(&p->pqm, q_data->q_id, pdd->dev->gws);
|
||||
|
||||
exit:
|
||||
if (ret)
|
||||
pr_err("Failed to create queue (%d)\n", ret);
|
||||
pr_err("Failed to restore queue (%d)\n", ret);
|
||||
else
|
||||
pr_debug("Queue id %d was restored successfully\n", queue_id);
|
||||
|
||||
|
@ -997,6 +997,7 @@ static struct clock_source *dcn21_clock_source_create(
|
||||
return &clk_src->base;
|
||||
}
|
||||
|
||||
kfree(clk_src);
|
||||
BREAK_TO_DEBUGGER();
|
||||
return NULL;
|
||||
}
|
||||
|
@ -427,6 +427,7 @@ int amdgpu_dpm_read_sensor(struct amdgpu_device *adev, enum amd_pp_sensors senso
|
||||
void amdgpu_dpm_compute_clocks(struct amdgpu_device *adev)
|
||||
{
|
||||
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
|
||||
int i;
|
||||
|
||||
if (!adev->pm.dpm_enabled)
|
||||
return;
|
||||
@ -434,6 +435,15 @@ void amdgpu_dpm_compute_clocks(struct amdgpu_device *adev)
|
||||
if (!pp_funcs->pm_compute_clocks)
|
||||
return;
|
||||
|
||||
if (adev->mode_info.num_crtc)
|
||||
amdgpu_display_bandwidth_update(adev);
|
||||
|
||||
for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
|
||||
struct amdgpu_ring *ring = adev->rings[i];
|
||||
if (ring && ring->sched.ready)
|
||||
amdgpu_fence_wait_empty(ring);
|
||||
}
|
||||
|
||||
mutex_lock(&adev->pm.mutex);
|
||||
pp_funcs->pm_compute_clocks(adev->powerplay.pp_handle);
|
||||
mutex_unlock(&adev->pm.mutex);
|
||||
@ -443,6 +453,20 @@ void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
if (adev->family == AMDGPU_FAMILY_SI) {
|
||||
mutex_lock(&adev->pm.mutex);
|
||||
if (enable) {
|
||||
adev->pm.dpm.uvd_active = true;
|
||||
adev->pm.dpm.state = POWER_STATE_TYPE_INTERNAL_UVD;
|
||||
} else {
|
||||
adev->pm.dpm.uvd_active = false;
|
||||
}
|
||||
mutex_unlock(&adev->pm.mutex);
|
||||
|
||||
amdgpu_dpm_compute_clocks(adev);
|
||||
return;
|
||||
}
|
||||
|
||||
ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_UVD, !enable);
|
||||
if (ret)
|
||||
DRM_ERROR("Dpm %s uvd failed, ret = %d. \n",
|
||||
@ -453,6 +477,21 @@ void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
if (adev->family == AMDGPU_FAMILY_SI) {
|
||||
mutex_lock(&adev->pm.mutex);
|
||||
if (enable) {
|
||||
adev->pm.dpm.vce_active = true;
|
||||
/* XXX select vce level based on ring/task */
|
||||
adev->pm.dpm.vce_level = AMD_VCE_LEVEL_AC_ALL;
|
||||
} else {
|
||||
adev->pm.dpm.vce_active = false;
|
||||
}
|
||||
mutex_unlock(&adev->pm.mutex);
|
||||
|
||||
amdgpu_dpm_compute_clocks(adev);
|
||||
return;
|
||||
}
|
||||
|
||||
ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VCE, !enable);
|
||||
if (ret)
|
||||
DRM_ERROR("Dpm %s vce failed, ret = %d. \n",
|
||||
|
@ -1028,16 +1028,6 @@ static int amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev)
|
||||
void amdgpu_legacy_dpm_compute_clocks(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
int i = 0;
|
||||
|
||||
if (adev->mode_info.num_crtc)
|
||||
amdgpu_display_bandwidth_update(adev);
|
||||
|
||||
for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
|
||||
struct amdgpu_ring *ring = adev->rings[i];
|
||||
if (ring && ring->sched.ready)
|
||||
amdgpu_fence_wait_empty(ring);
|
||||
}
|
||||
|
||||
amdgpu_dpm_get_active_displays(adev);
|
||||
|
||||
|
@ -3892,40 +3892,6 @@ static int si_set_boot_state(struct amdgpu_device *adev)
|
||||
}
|
||||
#endif
|
||||
|
||||
static int si_set_powergating_by_smu(void *handle,
|
||||
uint32_t block_type,
|
||||
bool gate)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
switch (block_type) {
|
||||
case AMD_IP_BLOCK_TYPE_UVD:
|
||||
if (!gate) {
|
||||
adev->pm.dpm.uvd_active = true;
|
||||
adev->pm.dpm.state = POWER_STATE_TYPE_INTERNAL_UVD;
|
||||
} else {
|
||||
adev->pm.dpm.uvd_active = false;
|
||||
}
|
||||
|
||||
amdgpu_legacy_dpm_compute_clocks(handle);
|
||||
break;
|
||||
case AMD_IP_BLOCK_TYPE_VCE:
|
||||
if (!gate) {
|
||||
adev->pm.dpm.vce_active = true;
|
||||
/* XXX select vce level based on ring/task */
|
||||
adev->pm.dpm.vce_level = AMD_VCE_LEVEL_AC_ALL;
|
||||
} else {
|
||||
adev->pm.dpm.vce_active = false;
|
||||
}
|
||||
|
||||
amdgpu_legacy_dpm_compute_clocks(handle);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int si_set_sw_state(struct amdgpu_device *adev)
|
||||
{
|
||||
return (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_SwitchToSwState) == PPSMC_Result_OK) ?
|
||||
@ -8125,7 +8091,6 @@ static const struct amd_pm_funcs si_dpm_funcs = {
|
||||
.print_power_state = &si_dpm_print_power_state,
|
||||
.debugfs_print_current_performance_level = &si_dpm_debugfs_print_current_performance_level,
|
||||
.force_performance_level = &si_dpm_force_performance_level,
|
||||
.set_powergating_by_smu = &si_set_powergating_by_smu,
|
||||
.vblank_too_short = &si_dpm_vblank_too_short,
|
||||
.set_fan_control_mode = &si_dpm_set_fan_control_mode,
|
||||
.get_fan_control_mode = &si_dpm_get_fan_control_mode,
|
||||
|
@ -1487,16 +1487,6 @@ static void pp_pm_compute_clocks(void *handle)
|
||||
{
|
||||
struct pp_hwmgr *hwmgr = handle;
|
||||
struct amdgpu_device *adev = hwmgr->adev;
|
||||
int i = 0;
|
||||
|
||||
if (adev->mode_info.num_crtc)
|
||||
amdgpu_display_bandwidth_update(adev);
|
||||
|
||||
for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
|
||||
struct amdgpu_ring *ring = adev->rings[i];
|
||||
if (ring && ring->sched.ready)
|
||||
amdgpu_fence_wait_empty(ring);
|
||||
}
|
||||
|
||||
if (!amdgpu_device_has_dc_support(adev)) {
|
||||
amdgpu_dpm_get_active_displays(adev);
|
||||
|
Loading…
Reference in New Issue
Block a user