Merge tag 'drm-next-5.3-2019-07-09' of git://people.freedesktop.org/~agd5f/linux into drm-next
drm-next-5.3-2019-07-09: amdgpu: - GPU reset for navi10 - Powerplay fixes for navi10 - GFX fixes for navi10 - Prepare for hmm_range_register API change - XGMI fixes - clang warning fixes - Fixes for various kconfig scenarios - Misc fixes and cleanups amdkfd: - Add workaround for soft hangs with oversubscribed runlists - Remove duplicated pcie atomics request Signed-off-by: Dave Airlie <airlied@redhat.com> From: Alex Deucher <alexdeucher@gmail.com> Link: https://patchwork.freedesktop.org/patch/msgid/20190710035017.3407-1-alexander.deucher@amd.com
This commit is contained in:
commit
b784d6bff9
@ -54,7 +54,9 @@ amdgpu-y += amdgpu_device.o amdgpu_kms.o \
|
||||
amdgpu_gtt_mgr.o amdgpu_vram_mgr.o amdgpu_virt.o amdgpu_atomfirmware.o \
|
||||
amdgpu_vf_error.o amdgpu_sched.o amdgpu_debugfs.o amdgpu_ids.o \
|
||||
amdgpu_gmc.o amdgpu_xgmi.o amdgpu_csa.o amdgpu_ras.o amdgpu_vm_cpu.o \
|
||||
amdgpu_vm_sdma.o amdgpu_pmu.o amdgpu_discovery.o
|
||||
amdgpu_vm_sdma.o amdgpu_discovery.o
|
||||
|
||||
amdgpu-$(CONFIG_PERF_EVENTS) += amdgpu_pmu.o
|
||||
|
||||
# add asic specific block
|
||||
amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o cik_ih.o kv_smc.o kv_dpm.o \
|
||||
|
@ -775,6 +775,7 @@ struct amdgpu_device {
|
||||
struct mutex grbm_idx_mutex;
|
||||
struct dev_pm_domain vga_pm_domain;
|
||||
bool have_disp_power_ref;
|
||||
bool have_atomics_support;
|
||||
|
||||
/* BIOS */
|
||||
bool is_atom_fw;
|
||||
@ -1216,6 +1217,10 @@ int amdgpu_dm_display_resume(struct amdgpu_device *adev );
|
||||
static inline int amdgpu_dm_display_resume(struct amdgpu_device *adev) { return 0; }
|
||||
#endif
|
||||
|
||||
|
||||
void amdgpu_register_gpu_instance(struct amdgpu_device *adev);
|
||||
void amdgpu_unregister_gpu_instance(struct amdgpu_device *adev);
|
||||
|
||||
#include "amdgpu_object.h"
|
||||
|
||||
/* used by df_v3_6.c and amdgpu_pmu.c */
|
||||
|
@ -668,6 +668,13 @@ bool amdgpu_amdkfd_is_kfd_vmid(struct amdgpu_device *adev, u32 vmid)
|
||||
return false;
|
||||
}
|
||||
|
||||
bool amdgpu_amdkfd_have_atomics_support(struct kgd_dev *kgd)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
|
||||
|
||||
return adev->have_atomics_support;
|
||||
}
|
||||
|
||||
#ifndef CONFIG_HSA_AMD
|
||||
bool amdkfd_fence_check_mm(struct dma_fence *f, struct mm_struct *mm)
|
||||
{
|
||||
|
@ -135,6 +135,7 @@ int amdgpu_amdkfd_submit_ib(struct kgd_dev *kgd, enum kgd_engine_type engine,
|
||||
uint32_t vmid, uint64_t gpu_addr,
|
||||
uint32_t *ib_cmd, uint32_t ib_len);
|
||||
void amdgpu_amdkfd_set_compute_idle(struct kgd_dev *kgd, bool idle);
|
||||
bool amdgpu_amdkfd_have_atomics_support(struct kgd_dev *kgd);
|
||||
|
||||
struct kfd2kgd_calls *amdgpu_amdkfd_gfx_7_get_functions(void);
|
||||
struct kfd2kgd_calls *amdgpu_amdkfd_gfx_8_0_get_functions(void);
|
||||
|
@ -504,7 +504,7 @@ static int init_user_pages(struct kgd_mem *mem, struct mm_struct *mm,
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = amdgpu_ttm_tt_get_user_pages(bo->tbo.ttm, bo->tbo.ttm->pages);
|
||||
ret = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages);
|
||||
if (ret) {
|
||||
pr_err("%s: Failed to get user pages: %d\n", __func__, ret);
|
||||
goto unregister_out;
|
||||
@ -813,7 +813,7 @@ static int process_sync_pds_resv(struct amdkfd_process_info *process_info,
|
||||
|
||||
ret = amdgpu_sync_resv(NULL,
|
||||
sync, pd->tbo.resv,
|
||||
AMDGPU_FENCE_OWNER_UNDEFINED, false);
|
||||
AMDGPU_FENCE_OWNER_KFD, false);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
@ -1729,8 +1729,7 @@ static int update_invalid_user_pages(struct amdkfd_process_info *process_info,
|
||||
bo = mem->bo;
|
||||
|
||||
/* Get updated user pages */
|
||||
ret = amdgpu_ttm_tt_get_user_pages(bo->tbo.ttm,
|
||||
bo->tbo.ttm->pages);
|
||||
ret = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages);
|
||||
if (ret) {
|
||||
pr_debug("%s: Failed to get user pages: %d\n",
|
||||
__func__, ret);
|
||||
@ -1740,6 +1739,12 @@ static int update_invalid_user_pages(struct amdkfd_process_info *process_info,
|
||||
}
|
||||
|
||||
amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm);
|
||||
|
||||
/* Mark the BO as valid unless it was invalidated
|
||||
* again concurrently.
|
||||
*/
|
||||
if (atomic_cmpxchg(&mem->invalid, invalid, 0) != invalid)
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -633,7 +633,7 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
r = amdgpu_ttm_tt_get_user_pages(bo->tbo.ttm, e->user_pages);
|
||||
r = amdgpu_ttm_tt_get_user_pages(bo, e->user_pages);
|
||||
if (r) {
|
||||
kvfree(e->user_pages);
|
||||
e->user_pages = NULL;
|
||||
|
@ -2596,6 +2596,17 @@ int amdgpu_device_init(struct amdgpu_device *adev,
|
||||
if (adev->rio_mem == NULL)
|
||||
DRM_INFO("PCI I/O BAR is not found.\n");
|
||||
|
||||
/* enable PCIE atomic ops */
|
||||
r = pci_enable_atomic_ops_to_root(adev->pdev,
|
||||
PCI_EXP_DEVCAP2_ATOMIC_COMP32 |
|
||||
PCI_EXP_DEVCAP2_ATOMIC_COMP64);
|
||||
if (r) {
|
||||
adev->have_atomics_support = false;
|
||||
DRM_INFO("PCIE atomic ops is not supported\n");
|
||||
} else {
|
||||
adev->have_atomics_support = true;
|
||||
}
|
||||
|
||||
amdgpu_device_get_pcie_info(adev);
|
||||
|
||||
if (amdgpu_mcbp)
|
||||
@ -2604,7 +2615,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
|
||||
if (amdgpu_mes && adev->asic_type >= CHIP_NAVI10)
|
||||
adev->enable_mes = true;
|
||||
|
||||
if (amdgpu_discovery) {
|
||||
if (amdgpu_discovery && adev->asic_type >= CHIP_NAVI10) {
|
||||
r = amdgpu_discovery_init(adev);
|
||||
if (r) {
|
||||
dev_err(adev->dev, "amdgpu_discovery_init failed\n");
|
||||
@ -2798,7 +2809,8 @@ fence_driver_init:
|
||||
return r;
|
||||
}
|
||||
|
||||
r = amdgpu_pmu_init(adev);
|
||||
if (IS_ENABLED(CONFIG_PERF_EVENTS))
|
||||
r = amdgpu_pmu_init(adev);
|
||||
if (r)
|
||||
dev_err(adev->dev, "amdgpu_pmu_init failed\n");
|
||||
|
||||
@ -2870,9 +2882,10 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
|
||||
amdgpu_debugfs_regs_cleanup(adev);
|
||||
device_remove_file(adev->dev, &dev_attr_pcie_replay_count);
|
||||
amdgpu_ucode_sysfs_fini(adev);
|
||||
amdgpu_pmu_fini(adev);
|
||||
if (IS_ENABLED(CONFIG_PERF_EVENTS))
|
||||
amdgpu_pmu_fini(adev);
|
||||
amdgpu_debugfs_preempt_cleanup(adev);
|
||||
if (amdgpu_discovery)
|
||||
if (amdgpu_discovery && adev->asic_type >= CHIP_NAVI10)
|
||||
amdgpu_discovery_fini(adev);
|
||||
}
|
||||
|
||||
@ -3559,6 +3572,12 @@ static int amdgpu_do_asic_reset(struct amdgpu_hive_info *hive,
|
||||
if (vram_lost)
|
||||
amdgpu_device_fill_reset_magic(tmp_adev);
|
||||
|
||||
/*
|
||||
* Add this ASIC as tracked as reset was already
|
||||
* complete successfully.
|
||||
*/
|
||||
amdgpu_register_gpu_instance(tmp_adev);
|
||||
|
||||
r = amdgpu_device_ip_late_init(tmp_adev);
|
||||
if (r)
|
||||
goto out;
|
||||
@ -3693,8 +3712,19 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
|
||||
device_list_handle = &device_list;
|
||||
}
|
||||
|
||||
/*
|
||||
* Mark these ASICs to be reseted as untracked first
|
||||
* And add them back after reset completed
|
||||
*/
|
||||
list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head)
|
||||
amdgpu_unregister_gpu_instance(tmp_adev);
|
||||
|
||||
/* block all schedulers and reset given job's ring */
|
||||
list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
|
||||
/* disable ras on ALL IPs */
|
||||
if (amdgpu_device_ip_need_full_reset(tmp_adev))
|
||||
amdgpu_ras_suspend(tmp_adev);
|
||||
|
||||
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
|
||||
struct amdgpu_ring *ring = tmp_adev->rings[i];
|
||||
|
||||
|
@ -247,7 +247,8 @@ module_param_named(msi, amdgpu_msi, int, 0444);
|
||||
* By default(with no lockup_timeout settings), the timeout for all non-compute(GFX, SDMA and Video)
|
||||
* jobs is 10000. And there is no timeout enforced on compute jobs.
|
||||
*/
|
||||
MODULE_PARM_DESC(lockup_timeout, "GPU lockup timeout in ms (default: 10000 for non-compute jobs and no timeout for compute jobs), "
|
||||
MODULE_PARM_DESC(lockup_timeout, "GPU lockup timeout in ms (default: 10000 for non-compute jobs and infinity timeout for compute jobs."
|
||||
" 0: keep default value. negative: infinity timeout), "
|
||||
"format is [Non-Compute] or [GFX,Compute,SDMA,Video]");
|
||||
module_param_string(lockup_timeout, amdgpu_lockup_timeout, sizeof(amdgpu_lockup_timeout), 0444);
|
||||
|
||||
@ -581,14 +582,27 @@ MODULE_PARM_DESC(async_gfx_ring,
|
||||
"Asynchronous GFX rings that could be configured with either different priorities (HP3D ring and LP3D ring), or equal priorities (0 = disabled, 1 = enabled (default))");
|
||||
module_param_named(async_gfx_ring, amdgpu_async_gfx_ring, int, 0444);
|
||||
|
||||
/**
|
||||
* DOC: mcbp (int)
|
||||
* It is used to enable mid command buffer preemption. (0 = disabled (default), 1 = enabled)
|
||||
*/
|
||||
MODULE_PARM_DESC(mcbp,
|
||||
"Enable Mid-command buffer preemption (0 = disabled (default), 1 = enabled)");
|
||||
module_param_named(mcbp, amdgpu_mcbp, int, 0444);
|
||||
|
||||
/**
|
||||
* DOC: discovery (int)
|
||||
* Allow driver to discover hardware IP information from IP Discovery table at the top of VRAM.
|
||||
*/
|
||||
MODULE_PARM_DESC(discovery,
|
||||
"Allow driver to discover hardware IPs from IP Discovery table at the top of VRAM");
|
||||
module_param_named(discovery, amdgpu_discovery, int, 0444);
|
||||
|
||||
/**
|
||||
* DOC: mes (int)
|
||||
* Enable Micro Engine Scheduler. This is a new hw scheduling engine for gfx, sdma, and compute.
|
||||
* (0 = disabled (default), 1 = enabled)
|
||||
*/
|
||||
MODULE_PARM_DESC(mes,
|
||||
"Enable Micro Engine Scheduler (0 = disabled (default), 1 = enabled)");
|
||||
module_param_named(mes, amdgpu_mes, int, 0444);
|
||||
@ -1302,7 +1316,8 @@ int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev)
|
||||
* By default timeout for non compute jobs is 10000.
|
||||
* And there is no timeout enforced on compute jobs.
|
||||
*/
|
||||
adev->gfx_timeout = adev->sdma_timeout = adev->video_timeout = 10000;
|
||||
adev->gfx_timeout = msecs_to_jiffies(10000);
|
||||
adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
|
||||
adev->compute_timeout = MAX_SCHEDULE_TIMEOUT;
|
||||
|
||||
if (strnlen(input, AMDGPU_MAX_TIMEOUT_PARAM_LENTH)) {
|
||||
@ -1312,10 +1327,13 @@ int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* Invalidate 0 and negative values */
|
||||
if (timeout <= 0) {
|
||||
if (timeout == 0) {
|
||||
index++;
|
||||
continue;
|
||||
} else if (timeout < 0) {
|
||||
timeout = MAX_SCHEDULE_TIMEOUT;
|
||||
} else {
|
||||
timeout = msecs_to_jiffies(timeout);
|
||||
}
|
||||
|
||||
switch (index++) {
|
||||
|
@ -31,7 +31,8 @@ struct amdgpu_gds {
|
||||
uint32_t gds_size;
|
||||
uint32_t gws_size;
|
||||
uint32_t oa_size;
|
||||
uint32_t gds_compute_max_wave_id;
|
||||
uint32_t gds_compute_max_wave_id;
|
||||
uint32_t vgt_gs_max_wave_id;
|
||||
};
|
||||
|
||||
struct amdgpu_gds_reg_offset {
|
||||
|
@ -327,8 +327,7 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
|
||||
}
|
||||
|
||||
if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE) {
|
||||
r = amdgpu_ttm_tt_get_user_pages(bo->tbo.ttm,
|
||||
bo->tbo.ttm->pages);
|
||||
r = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages);
|
||||
if (r)
|
||||
goto release_object;
|
||||
|
||||
|
@ -44,7 +44,7 @@
|
||||
#include "amdgpu_display.h"
|
||||
#include "amdgpu_ras.h"
|
||||
|
||||
static void amdgpu_unregister_gpu_instance(struct amdgpu_device *adev)
|
||||
void amdgpu_unregister_gpu_instance(struct amdgpu_device *adev)
|
||||
{
|
||||
struct amdgpu_gpu_instance *gpu_instance;
|
||||
int i;
|
||||
@ -105,7 +105,7 @@ done_free:
|
||||
dev->dev_private = NULL;
|
||||
}
|
||||
|
||||
static void amdgpu_register_gpu_instance(struct amdgpu_device *adev)
|
||||
void amdgpu_register_gpu_instance(struct amdgpu_device *adev)
|
||||
{
|
||||
struct amdgpu_gpu_instance *gpu_instance;
|
||||
|
||||
|
@ -45,48 +45,11 @@
|
||||
|
||||
#include <linux/firmware.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/hmm.h>
|
||||
#include <linux/interval_tree.h>
|
||||
|
||||
#include <drm/drm.h>
|
||||
|
||||
#include "amdgpu.h"
|
||||
#include "amdgpu_amdkfd.h"
|
||||
|
||||
/**
|
||||
* struct amdgpu_mn
|
||||
*
|
||||
* @adev: amdgpu device pointer
|
||||
* @mm: process address space
|
||||
* @type: type of MMU notifier
|
||||
* @work: destruction work item
|
||||
* @node: hash table node to find structure by adev and mn
|
||||
* @lock: rw semaphore protecting the notifier nodes
|
||||
* @objects: interval tree containing amdgpu_mn_nodes
|
||||
* @mirror: HMM mirror function support
|
||||
*
|
||||
* Data for each amdgpu device and process address space.
|
||||
*/
|
||||
struct amdgpu_mn {
|
||||
/* constant after initialisation */
|
||||
struct amdgpu_device *adev;
|
||||
struct mm_struct *mm;
|
||||
enum amdgpu_mn_type type;
|
||||
|
||||
/* only used on destruction */
|
||||
struct work_struct work;
|
||||
|
||||
/* protected by adev->mn_lock */
|
||||
struct hlist_node node;
|
||||
|
||||
/* objects protected by lock */
|
||||
struct rw_semaphore lock;
|
||||
struct rb_root_cached objects;
|
||||
|
||||
/* HMM mirror */
|
||||
struct hmm_mirror mirror;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct amdgpu_mn_node
|
||||
*
|
||||
|
@ -24,17 +24,53 @@
|
||||
#ifndef __AMDGPU_MN_H__
|
||||
#define __AMDGPU_MN_H__
|
||||
|
||||
/*
|
||||
* HMM mirror
|
||||
*/
|
||||
struct amdgpu_mn;
|
||||
struct hmm_range;
|
||||
#include <linux/types.h>
|
||||
#include <linux/hmm.h>
|
||||
#include <linux/rwsem.h>
|
||||
#include <linux/workqueue.h>
|
||||
#include <linux/interval_tree.h>
|
||||
|
||||
enum amdgpu_mn_type {
|
||||
AMDGPU_MN_TYPE_GFX,
|
||||
AMDGPU_MN_TYPE_HSA,
|
||||
};
|
||||
|
||||
/**
|
||||
* struct amdgpu_mn
|
||||
*
|
||||
* @adev: amdgpu device pointer
|
||||
* @mm: process address space
|
||||
* @type: type of MMU notifier
|
||||
* @work: destruction work item
|
||||
* @node: hash table node to find structure by adev and mn
|
||||
* @lock: rw semaphore protecting the notifier nodes
|
||||
* @objects: interval tree containing amdgpu_mn_nodes
|
||||
* @mirror: HMM mirror function support
|
||||
*
|
||||
* Data for each amdgpu device and process address space.
|
||||
*/
|
||||
struct amdgpu_mn {
|
||||
/* constant after initialisation */
|
||||
struct amdgpu_device *adev;
|
||||
struct mm_struct *mm;
|
||||
enum amdgpu_mn_type type;
|
||||
|
||||
/* only used on destruction */
|
||||
struct work_struct work;
|
||||
|
||||
/* protected by adev->mn_lock */
|
||||
struct hlist_node node;
|
||||
|
||||
/* objects protected by lock */
|
||||
struct rw_semaphore lock;
|
||||
struct rb_root_cached objects;
|
||||
|
||||
#ifdef CONFIG_HMM_MIRROR
|
||||
/* HMM mirror */
|
||||
struct hmm_mirror mirror;
|
||||
#endif
|
||||
};
|
||||
|
||||
#if defined(CONFIG_HMM_MIRROR)
|
||||
void amdgpu_mn_lock(struct amdgpu_mn *mn);
|
||||
void amdgpu_mn_unlock(struct amdgpu_mn *mn);
|
||||
|
@ -731,8 +731,10 @@ struct amdgpu_ttm_tt {
|
||||
|
||||
#define MAX_RETRY_HMM_RANGE_FAULT 16
|
||||
|
||||
int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages)
|
||||
int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages)
|
||||
{
|
||||
struct hmm_mirror *mirror = bo->mn ? &bo->mn->mirror : NULL;
|
||||
struct ttm_tt *ttm = bo->tbo.ttm;
|
||||
struct amdgpu_ttm_tt *gtt = (void *)ttm;
|
||||
struct mm_struct *mm = gtt->usertask->mm;
|
||||
unsigned long start = gtt->userptr;
|
||||
@ -746,6 +748,12 @@ int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages)
|
||||
if (!mm) /* Happens during process shutdown */
|
||||
return -ESRCH;
|
||||
|
||||
if (unlikely(!mirror)) {
|
||||
DRM_DEBUG_DRIVER("Failed to get hmm_mirror\n");
|
||||
r = -EFAULT;
|
||||
goto out;
|
||||
}
|
||||
|
||||
vma = find_vma(mm, start);
|
||||
if (unlikely(!vma || start < vma->vm_start)) {
|
||||
r = -EFAULT;
|
||||
|
@ -102,10 +102,11 @@ int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo);
|
||||
int amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo);
|
||||
|
||||
#if IS_ENABLED(CONFIG_DRM_AMDGPU_USERPTR)
|
||||
int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages);
|
||||
int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages);
|
||||
bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm);
|
||||
#else
|
||||
static inline int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages)
|
||||
static inline int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo,
|
||||
struct page **pages)
|
||||
{
|
||||
return -EPERM;
|
||||
}
|
||||
|
@ -1544,24 +1544,6 @@ static void gfx_v10_0_constants_init(struct amdgpu_device *adev)
|
||||
|
||||
gfx_v10_0_init_compute_vmid(adev);
|
||||
|
||||
mutex_lock(&adev->grbm_idx_mutex);
|
||||
/*
|
||||
* making sure that the following register writes will be broadcasted
|
||||
* to all the shaders
|
||||
*/
|
||||
gfx_v10_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
|
||||
|
||||
tmp = REG_SET_FIELD(0, PA_SC_FIFO_SIZE, SC_FRONTEND_PRIM_FIFO_SIZE,
|
||||
adev->gfx.config.sc_prim_fifo_size_frontend);
|
||||
tmp = REG_SET_FIELD(tmp, PA_SC_FIFO_SIZE, SC_BACKEND_PRIM_FIFO_SIZE,
|
||||
adev->gfx.config.sc_prim_fifo_size_backend);
|
||||
tmp = REG_SET_FIELD(tmp, PA_SC_FIFO_SIZE, SC_HIZ_TILE_FIFO_SIZE,
|
||||
adev->gfx.config.sc_hiz_tile_fifo_size);
|
||||
tmp = REG_SET_FIELD(tmp, PA_SC_FIFO_SIZE, SC_EARLYZ_TILE_FIFO_SIZE,
|
||||
adev->gfx.config.sc_earlyz_tile_fifo_size);
|
||||
WREG32_SOC15(GC, 0, mmPA_SC_FIFO_SIZE, tmp);
|
||||
|
||||
mutex_unlock(&adev->grbm_idx_mutex);
|
||||
}
|
||||
|
||||
static void gfx_v10_0_enable_gui_idle_interrupt(struct amdgpu_device *adev,
|
||||
@ -4215,6 +4197,15 @@ static void gfx_v10_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
|
||||
unsigned vmid = AMDGPU_JOB_GET_VMID(job);
|
||||
u32 header, control = 0;
|
||||
|
||||
/* Prevent a hw deadlock due to a wave ID mismatch between ME and GDS.
|
||||
* This resets the wave ID counters. (needed by transform feedback)
|
||||
* TODO: This might only be needed on a VMID switch when we change
|
||||
* the GDS OA mapping, not sure.
|
||||
*/
|
||||
amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
|
||||
amdgpu_ring_write(ring, mmVGT_GS_MAX_WAVE_ID);
|
||||
amdgpu_ring_write(ring, ring->adev->gds.vgt_gs_max_wave_id);
|
||||
|
||||
if (ib->flags & AMDGPU_IB_FLAG_CE)
|
||||
header = PACKET3(PACKET3_INDIRECT_BUFFER_CNST, 2);
|
||||
else
|
||||
@ -4252,6 +4243,22 @@ static void gfx_v10_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
|
||||
unsigned vmid = AMDGPU_JOB_GET_VMID(job);
|
||||
u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24);
|
||||
|
||||
/* Currently, there is a high possibility to get wave ID mismatch
|
||||
* between ME and GDS, leading to a hw deadlock, because ME generates
|
||||
* different wave IDs than the GDS expects. This situation happens
|
||||
* randomly when at least 5 compute pipes use GDS ordered append.
|
||||
* The wave IDs generated by ME are also wrong after suspend/resume.
|
||||
* Those are probably bugs somewhere else in the kernel driver.
|
||||
*
|
||||
* Writing GDS_COMPUTE_MAX_WAVE_ID resets wave ID counters in ME and
|
||||
* GDS to 0 for this ring (me/pipe).
|
||||
*/
|
||||
if (ib->flags & AMDGPU_IB_FLAG_RESET_GDS_MAX_WAVE_ID) {
|
||||
amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
|
||||
amdgpu_ring_write(ring, mmGDS_COMPUTE_MAX_WAVE_ID);
|
||||
amdgpu_ring_write(ring, ring->adev->gds.gds_compute_max_wave_id);
|
||||
}
|
||||
|
||||
amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
|
||||
BUG_ON(ib->gpu_addr & 0x3); /* Dword align */
|
||||
amdgpu_ring_write(ring,
|
||||
@ -4278,11 +4285,7 @@ static void gfx_v10_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
|
||||
amdgpu_ring_write(ring, PACKET3(PACKET3_RELEASE_MEM, 6));
|
||||
amdgpu_ring_write(ring, (PACKET3_RELEASE_MEM_GCR_SEQ |
|
||||
PACKET3_RELEASE_MEM_GCR_GL2_WB |
|
||||
PACKET3_RELEASE_MEM_GCR_GL2_INV |
|
||||
PACKET3_RELEASE_MEM_GCR_GL2_US |
|
||||
PACKET3_RELEASE_MEM_GCR_GL1_INV |
|
||||
PACKET3_RELEASE_MEM_GCR_GLV_INV |
|
||||
PACKET3_RELEASE_MEM_GCR_GLM_INV |
|
||||
PACKET3_RELEASE_MEM_GCR_GLM_INV | /* must be set with GLM_WB */
|
||||
PACKET3_RELEASE_MEM_GCR_GLM_WB |
|
||||
PACKET3_RELEASE_MEM_CACHE_POLICY(3) |
|
||||
PACKET3_RELEASE_MEM_EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
|
||||
@ -4948,7 +4951,7 @@ static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_gfx = {
|
||||
5 + /* HDP_INVL */
|
||||
8 + 8 + /* FENCE x2 */
|
||||
2, /* SWITCH_BUFFER */
|
||||
.emit_ib_size = 4, /* gfx_v10_0_ring_emit_ib_gfx */
|
||||
.emit_ib_size = 7, /* gfx_v10_0_ring_emit_ib_gfx */
|
||||
.emit_ib = gfx_v10_0_ring_emit_ib_gfx,
|
||||
.emit_fence = gfx_v10_0_ring_emit_fence,
|
||||
.emit_pipeline_sync = gfx_v10_0_ring_emit_pipeline_sync,
|
||||
@ -4987,7 +4990,7 @@ static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_compute = {
|
||||
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
|
||||
2 + /* gfx_v10_0_ring_emit_vm_flush */
|
||||
8 + 8 + 8, /* gfx_v10_0_ring_emit_fence x3 for user fence, vm fence */
|
||||
.emit_ib_size = 4, /* gfx_v10_0_ring_emit_ib_compute */
|
||||
.emit_ib_size = 7, /* gfx_v10_0_ring_emit_ib_compute */
|
||||
.emit_ib = gfx_v10_0_ring_emit_ib_compute,
|
||||
.emit_fence = gfx_v10_0_ring_emit_fence,
|
||||
.emit_pipeline_sync = gfx_v10_0_ring_emit_pipeline_sync,
|
||||
@ -5020,7 +5023,7 @@ static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_kiq = {
|
||||
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
|
||||
2 + /* gfx_v10_0_ring_emit_vm_flush */
|
||||
8 + 8 + 8, /* gfx_v10_0_ring_emit_fence_kiq x3 for user fence, vm fence */
|
||||
.emit_ib_size = 4, /* gfx_v10_0_ring_emit_ib_compute */
|
||||
.emit_ib_size = 7, /* gfx_v10_0_ring_emit_ib_compute */
|
||||
.emit_ib = gfx_v10_0_ring_emit_ib_compute,
|
||||
.emit_fence = gfx_v10_0_ring_emit_fence_kiq,
|
||||
.test_ring = gfx_v10_0_ring_test_ring,
|
||||
@ -5096,10 +5099,10 @@ static void gfx_v10_0_set_gds_init(struct amdgpu_device *adev)
|
||||
/* init asic gds info */
|
||||
switch (adev->asic_type) {
|
||||
case CHIP_NAVI10:
|
||||
adev->gds.gds_size = 0x10000;
|
||||
break;
|
||||
default:
|
||||
adev->gds.gds_size = 0x10000;
|
||||
adev->gds.gds_compute_max_wave_id = 0x4ff;
|
||||
adev->gds.vgt_gs_max_wave_id = 0x3ff;
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -3925,11 +3925,10 @@ static int gfx_v8_0_init_save_restore_list(struct amdgpu_device *adev)
|
||||
|
||||
int list_size;
|
||||
unsigned int *register_list_format =
|
||||
kmalloc(adev->gfx.rlc.reg_list_format_size_bytes, GFP_KERNEL);
|
||||
kmemdup(adev->gfx.rlc.register_list_format,
|
||||
adev->gfx.rlc.reg_list_format_size_bytes, GFP_KERNEL);
|
||||
if (!register_list_format)
|
||||
return -ENOMEM;
|
||||
memcpy(register_list_format, adev->gfx.rlc.register_list_format,
|
||||
adev->gfx.rlc.reg_list_format_size_bytes);
|
||||
|
||||
gfx_v8_0_parse_ind_reg_list(register_list_format,
|
||||
RLC_FormatDirectRegListLength,
|
||||
|
@ -1960,25 +1960,6 @@ static void gfx_v9_0_constants_init(struct amdgpu_device *adev)
|
||||
mutex_unlock(&adev->srbm_mutex);
|
||||
|
||||
gfx_v9_0_init_compute_vmid(adev);
|
||||
|
||||
mutex_lock(&adev->grbm_idx_mutex);
|
||||
/*
|
||||
* making sure that the following register writes will be broadcasted
|
||||
* to all the shaders
|
||||
*/
|
||||
gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
|
||||
|
||||
WREG32_SOC15_RLC(GC, 0, mmPA_SC_FIFO_SIZE,
|
||||
(adev->gfx.config.sc_prim_fifo_size_frontend <<
|
||||
PA_SC_FIFO_SIZE__SC_FRONTEND_PRIM_FIFO_SIZE__SHIFT) |
|
||||
(adev->gfx.config.sc_prim_fifo_size_backend <<
|
||||
PA_SC_FIFO_SIZE__SC_BACKEND_PRIM_FIFO_SIZE__SHIFT) |
|
||||
(adev->gfx.config.sc_hiz_tile_fifo_size <<
|
||||
PA_SC_FIFO_SIZE__SC_HIZ_TILE_FIFO_SIZE__SHIFT) |
|
||||
(adev->gfx.config.sc_earlyz_tile_fifo_size <<
|
||||
PA_SC_FIFO_SIZE__SC_EARLYZ_TILE_FIFO_SIZE__SHIFT));
|
||||
mutex_unlock(&adev->grbm_idx_mutex);
|
||||
|
||||
}
|
||||
|
||||
static void gfx_v9_0_wait_for_rlc_serdes(struct amdgpu_device *adev)
|
||||
@ -2093,11 +2074,10 @@ static int gfx_v9_1_init_rlc_save_restore_list(struct amdgpu_device *adev)
|
||||
u32 tmp = 0;
|
||||
|
||||
u32 *register_list_format =
|
||||
kmalloc(adev->gfx.rlc.reg_list_format_size_bytes, GFP_KERNEL);
|
||||
kmemdup(adev->gfx.rlc.register_list_format,
|
||||
adev->gfx.rlc.reg_list_format_size_bytes, GFP_KERNEL);
|
||||
if (!register_list_format)
|
||||
return -ENOMEM;
|
||||
memcpy(register_list_format, adev->gfx.rlc.register_list_format,
|
||||
adev->gfx.rlc.reg_list_format_size_bytes);
|
||||
|
||||
/* setup unique_indirect_regs array and indirect_start_offsets array */
|
||||
unique_indirect_reg_count = ARRAY_SIZE(unique_indirect_regs);
|
||||
|
@ -245,8 +245,9 @@ static void gmc_v10_0_flush_gpu_tlb(struct amdgpu_device *adev,
|
||||
mutex_lock(&adev->mman.gtt_window_lock);
|
||||
|
||||
gmc_v10_0_flush_vm_hub(adev, vmid, AMDGPU_MMHUB, 0);
|
||||
if (!adev->mman.buffer_funcs_enabled || !adev->ib_pool_ready ||
|
||||
adev->asic_type != CHIP_NAVI10) {
|
||||
if (!adev->mman.buffer_funcs_enabled ||
|
||||
!adev->ib_pool_ready ||
|
||||
adev->in_gpu_reset) {
|
||||
gmc_v10_0_flush_vm_hub(adev, vmid, AMDGPU_GFXHUB, 0);
|
||||
mutex_unlock(&adev->mman.gtt_window_lock);
|
||||
return;
|
||||
|
@ -22,7 +22,7 @@
|
||||
*/
|
||||
|
||||
#ifndef __MES_V10_1_H__
|
||||
#define __MES_v10_1_H__
|
||||
#define __MES_V10_1_H__
|
||||
|
||||
extern const struct amdgpu_ip_block_version mes_v10_1_ip_block;
|
||||
|
||||
|
@ -31,6 +31,7 @@
|
||||
#include "amdgpu_vce.h"
|
||||
#include "amdgpu_ucode.h"
|
||||
#include "amdgpu_psp.h"
|
||||
#include "amdgpu_smu.h"
|
||||
#include "atom.h"
|
||||
#include "amd_pcie.h"
|
||||
|
||||
@ -255,6 +256,39 @@ static void nv_gpu_pci_config_reset(struct amdgpu_device *adev)
|
||||
}
|
||||
#endif
|
||||
|
||||
static int nv_asic_mode1_reset(struct amdgpu_device *adev)
|
||||
{
|
||||
u32 i;
|
||||
int ret = 0;
|
||||
|
||||
amdgpu_atombios_scratch_regs_engine_hung(adev, true);
|
||||
|
||||
dev_info(adev->dev, "GPU mode1 reset\n");
|
||||
|
||||
/* disable BM */
|
||||
pci_clear_master(adev->pdev);
|
||||
|
||||
pci_save_state(adev->pdev);
|
||||
|
||||
ret = psp_gpu_reset(adev);
|
||||
if (ret)
|
||||
dev_err(adev->dev, "GPU mode1 reset failed\n");
|
||||
|
||||
pci_restore_state(adev->pdev);
|
||||
|
||||
/* wait for asic to come out of reset */
|
||||
for (i = 0; i < adev->usec_timeout; i++) {
|
||||
u32 memsize = adev->nbio_funcs->get_memsize(adev);
|
||||
|
||||
if (memsize != 0xffffffff)
|
||||
break;
|
||||
udelay(1);
|
||||
}
|
||||
|
||||
amdgpu_atombios_scratch_regs_engine_hung(adev, false);
|
||||
|
||||
return ret;
|
||||
}
|
||||
static int nv_asic_reset(struct amdgpu_device *adev)
|
||||
{
|
||||
|
||||
@ -266,8 +300,15 @@ static int nv_asic_reset(struct amdgpu_device *adev)
|
||||
|
||||
amdgpu_atombios_scratch_regs_engine_hung(adev, false);
|
||||
#endif
|
||||
int ret = 0;
|
||||
struct smu_context *smu = &adev->smu;
|
||||
|
||||
return 0;
|
||||
if (smu_baco_is_support(smu))
|
||||
ret = smu_baco_reset(smu);
|
||||
else
|
||||
ret = nv_asic_mode1_reset(adev);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int nv_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk)
|
||||
@ -348,8 +389,12 @@ int nv_set_ip_blocks(struct amdgpu_device *adev)
|
||||
amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
|
||||
if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
|
||||
amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
|
||||
#if defined(CONFIG_DRM_AMD_DC)
|
||||
else if (amdgpu_device_has_dc_support(adev))
|
||||
amdgpu_device_ip_block_add(adev, &dm_ip_block);
|
||||
#else
|
||||
# warning "Enable CONFIG_DRM_AMD_DC for display support on navi."
|
||||
#endif
|
||||
amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &sdma_v5_0_ip_block);
|
||||
if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT &&
|
||||
|
@ -534,7 +534,7 @@ psp_v11_0_sram_map(struct amdgpu_device *adev,
|
||||
|
||||
case AMDGPU_UCODE_ID_RLC_G:
|
||||
*sram_offset = 0x2000;
|
||||
if (adev->asic_type != CHIP_NAVI10) {
|
||||
if (adev->asic_type < CHIP_NAVI10) {
|
||||
*sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_UCODE_ADDR);
|
||||
*sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_UCODE_DATA);
|
||||
} else {
|
||||
@ -545,7 +545,7 @@ psp_v11_0_sram_map(struct amdgpu_device *adev,
|
||||
|
||||
case AMDGPU_UCODE_ID_SDMA0:
|
||||
*sram_offset = 0x0;
|
||||
if (adev->asic_type != CHIP_NAVI10) {
|
||||
if (adev->asic_type < CHIP_NAVI10) {
|
||||
*sram_addr_reg_offset = SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_UCODE_ADDR);
|
||||
*sram_data_reg_offset = SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_UCODE_DATA);
|
||||
} else {
|
||||
|
@ -487,7 +487,6 @@ struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd,
|
||||
struct pci_dev *pdev, const struct kfd2kgd_calls *f2g)
|
||||
{
|
||||
struct kfd_dev *kfd;
|
||||
int ret;
|
||||
const struct kfd_device_info *device_info =
|
||||
lookup_device_info(pdev->device);
|
||||
|
||||
@ -504,17 +503,15 @@ struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd,
|
||||
* 32 and 64-bit requests are possible and must be
|
||||
* supported.
|
||||
*/
|
||||
ret = pci_enable_atomic_ops_to_root(pdev,
|
||||
PCI_EXP_DEVCAP2_ATOMIC_COMP32 |
|
||||
PCI_EXP_DEVCAP2_ATOMIC_COMP64);
|
||||
if (device_info->needs_pci_atomics && ret < 0) {
|
||||
kfd->pci_atomic_requested = amdgpu_amdkfd_have_atomics_support(kgd);
|
||||
if (device_info->needs_pci_atomics &&
|
||||
!kfd->pci_atomic_requested) {
|
||||
dev_info(kfd_device,
|
||||
"skipped device %x:%x, PCI rejects atomics\n",
|
||||
pdev->vendor, pdev->device);
|
||||
kfree(kfd);
|
||||
return NULL;
|
||||
} else if (!ret)
|
||||
kfd->pci_atomic_requested = true;
|
||||
}
|
||||
|
||||
kfd->kgd = kgd;
|
||||
kfd->device_info = device_info;
|
||||
|
@ -134,6 +134,7 @@ static int pm_runlist_v9(struct packet_manager *pm, uint32_t *buffer,
|
||||
packet->bitfields4.ib_size = ib_size_in_dwords;
|
||||
packet->bitfields4.chain = chain ? 1 : 0;
|
||||
packet->bitfields4.offload_polling = 0;
|
||||
packet->bitfields4.chained_runlist_idle_disable = chain ? 1 : 0;
|
||||
packet->bitfields4.valid = 1;
|
||||
packet->bitfields4.process_cnt = concurrent_proc_cnt;
|
||||
packet->ordinal2 = lower_32_bits(ib);
|
||||
|
@ -203,11 +203,15 @@ static int pm_create_runlist_ib(struct packet_manager *pm,
|
||||
|
||||
pr_debug("Finished map process and queues to runlist\n");
|
||||
|
||||
if (is_over_subscription)
|
||||
if (is_over_subscription) {
|
||||
if (!pm->is_over_subscription)
|
||||
pr_warn("Runlist is getting oversubscribed. Expect reduced ROCm performance.\n");
|
||||
retval = pm->pmf->runlist(pm, &rl_buffer[rl_wptr],
|
||||
*rl_gpu_addr,
|
||||
alloc_size_bytes / sizeof(uint32_t),
|
||||
true);
|
||||
}
|
||||
pm->is_over_subscription = is_over_subscription;
|
||||
|
||||
for (i = 0; i < alloc_size_bytes / sizeof(uint32_t); i++)
|
||||
pr_debug("0x%2X ", rl_buffer[i]);
|
||||
|
@ -120,7 +120,7 @@ struct pm4_mes_runlist {
|
||||
uint32_t ib_size:20;
|
||||
uint32_t chain:1;
|
||||
uint32_t offload_polling:1;
|
||||
uint32_t reserved2:1;
|
||||
uint32_t chained_runlist_idle_disable:1;
|
||||
uint32_t valid:1;
|
||||
uint32_t process_cnt:4;
|
||||
uint32_t reserved3:4;
|
||||
|
@ -937,6 +937,7 @@ struct packet_manager {
|
||||
bool allocated;
|
||||
struct kfd_mem_obj *ib_buffer_obj;
|
||||
unsigned int ib_size_bytes;
|
||||
bool is_over_subscription;
|
||||
|
||||
const struct packet_manager_funcs *pmf;
|
||||
};
|
||||
|
@ -1535,10 +1535,7 @@ static int dce110_register_irq_handlers(struct amdgpu_device *adev)
|
||||
int i;
|
||||
unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
|
||||
|
||||
if (adev->asic_type == CHIP_VEGA10 ||
|
||||
adev->asic_type == CHIP_VEGA12 ||
|
||||
adev->asic_type == CHIP_VEGA20 ||
|
||||
adev->asic_type == CHIP_RAVEN)
|
||||
if (adev->asic_type >= CHIP_VEGA10)
|
||||
client_id = SOC15_IH_CLIENTID_DCE;
|
||||
|
||||
int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
|
||||
|
@ -1190,14 +1190,12 @@ struct dc_state *dc_create_state(struct dc *dc)
|
||||
struct dc_state *dc_copy_state(struct dc_state *src_ctx)
|
||||
{
|
||||
int i, j;
|
||||
struct dc_state *new_ctx = kzalloc(sizeof(struct dc_state),
|
||||
GFP_KERNEL);
|
||||
struct dc_state *new_ctx = kmemdup(src_ctx,
|
||||
sizeof(struct dc_state), GFP_KERNEL);
|
||||
|
||||
if (!new_ctx)
|
||||
return NULL;
|
||||
|
||||
memcpy(new_ctx, src_ctx, sizeof(struct dc_state));
|
||||
|
||||
for (i = 0; i < MAX_PIPES; i++) {
|
||||
struct pipe_ctx *cur_pipe = &new_ctx->res_ctx.pipe_ctx[i];
|
||||
|
||||
|
@ -3072,8 +3072,8 @@ uint32_t dc_link_bandwidth_kbps(
|
||||
* but the difference is minimal and is in a safe direction,
|
||||
* which all works well around potential ambiguity of DP 1.4a spec.
|
||||
*/
|
||||
long long fec_link_bw_kbps = link_bw_kbps * 970LL;
|
||||
link_bw_kbps = (uint32_t)(fec_link_bw_kbps / 1000LL);
|
||||
link_bw_kbps = mul_u64_u32_shr(BIT_ULL(32) * 970LL / 1000,
|
||||
link_bw_kbps, 32);
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -181,12 +181,10 @@ struct dc_stream_state *dc_copy_stream(const struct dc_stream_state *stream)
|
||||
{
|
||||
struct dc_stream_state *new_stream;
|
||||
|
||||
new_stream = kzalloc(sizeof(struct dc_stream_state), GFP_KERNEL);
|
||||
new_stream = kmemdup(stream, sizeof(struct dc_stream_state), GFP_KERNEL);
|
||||
if (!new_stream)
|
||||
return NULL;
|
||||
|
||||
memcpy(new_stream, stream, sizeof(struct dc_stream_state));
|
||||
|
||||
if (new_stream->sink)
|
||||
dc_sink_retain(new_stream->sink);
|
||||
|
||||
|
@ -22,6 +22,7 @@
|
||||
* Authors: AMD
|
||||
*
|
||||
*/
|
||||
#include <linux/delay.h>
|
||||
|
||||
#include "dm_services.h"
|
||||
#include "dm_helpers.h"
|
||||
|
@ -2647,7 +2647,7 @@ static void update_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_
|
||||
calculated_states[i].dram_speed_mts = uclk_states[i] * 16 / 1000;
|
||||
|
||||
// FCLK:UCLK ratio is 1.08
|
||||
min_fclk_required_by_uclk = ((unsigned long long)uclk_states[i]) * 1080 / 1000000;
|
||||
min_fclk_required_by_uclk = mul_u64_u32_shr(BIT_ULL(32) * 1080 / 1000000, uclk_states[i], 32);
|
||||
|
||||
calculated_states[i].fabricclk_mhz = (min_fclk_required_by_uclk < min_dcfclk) ?
|
||||
min_dcfclk : min_fclk_required_by_uclk;
|
||||
|
@ -634,6 +634,11 @@ static int smu_sw_init(void *handle)
|
||||
bitmap_zero(smu->smu_feature.supported, SMU_FEATURE_MAX);
|
||||
bitmap_zero(smu->smu_feature.enabled, SMU_FEATURE_MAX);
|
||||
bitmap_zero(smu->smu_feature.allowed, SMU_FEATURE_MAX);
|
||||
|
||||
mutex_init(&smu->smu_baco.mutex);
|
||||
smu->smu_baco.state = SMU_BACO_STATE_EXIT;
|
||||
smu->smu_baco.platform_support = false;
|
||||
|
||||
smu->watermarks_bitmap = 0;
|
||||
smu->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
|
||||
smu->default_power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
|
||||
@ -1103,11 +1108,20 @@ static int smu_suspend(void *handle)
|
||||
int ret;
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
struct smu_context *smu = &adev->smu;
|
||||
bool baco_feature_is_enabled = smu_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT);
|
||||
|
||||
ret = smu_system_features_control(smu, false);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (adev->in_gpu_reset && baco_feature_is_enabled) {
|
||||
ret = smu_feature_set_enabled(smu, SMU_FEATURE_BACO_BIT, true);
|
||||
if (ret) {
|
||||
pr_warn("set BACO feature enabled failed, return %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
smu->watermarks_bitmap &= ~(WATERMARKS_LOADED);
|
||||
|
||||
if (adev->asic_type >= CHIP_NAVI10 &&
|
||||
|
@ -327,7 +327,7 @@ int hwmgr_resume(struct pp_hwmgr *hwmgr)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = psm_adjust_power_state_dynamic(hwmgr, true, NULL);
|
||||
ret = psm_adjust_power_state_dynamic(hwmgr, false, NULL);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -916,8 +916,10 @@ static int init_thermal_controller(
|
||||
PHM_PlatformCaps_ThermalController
|
||||
);
|
||||
|
||||
if (0 == powerplay_table->usFanTableOffset)
|
||||
if (0 == powerplay_table->usFanTableOffset) {
|
||||
hwmgr->thermal_controller.use_hw_fan_control = 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
fan_table = (const PPTable_Generic_SubTable_Header *)
|
||||
(((unsigned long)powerplay_table) +
|
||||
|
@ -3495,7 +3495,7 @@ static int smu7_get_gpu_power(struct pp_hwmgr *hwmgr, u32 *query)
|
||||
ixSMU_PM_STATUS_95, 0);
|
||||
|
||||
for (i = 0; i < 10; i++) {
|
||||
mdelay(500);
|
||||
msleep(500);
|
||||
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PmStatusLogSample);
|
||||
tmp = cgs_read_ind_register(hwmgr->device,
|
||||
CGS_IND_REG__SMC,
|
||||
|
@ -241,6 +241,7 @@ enum smu_message_type
|
||||
SMU_MSG_PowerUpJpeg,
|
||||
SMU_MSG_PowerDownJpeg,
|
||||
SMU_MSG_BacoAudioD3PME,
|
||||
SMU_MSG_ArmD3,
|
||||
SMU_MSG_MAX_COUNT,
|
||||
};
|
||||
|
||||
@ -489,6 +490,19 @@ struct mclock_latency_table {
|
||||
struct mclk_latency_entries entries[MAX_REGULAR_DPM_NUM];
|
||||
};
|
||||
|
||||
enum smu_baco_state
|
||||
{
|
||||
SMU_BACO_STATE_ENTER = 0,
|
||||
SMU_BACO_STATE_EXIT,
|
||||
};
|
||||
|
||||
struct smu_baco_context
|
||||
{
|
||||
struct mutex mutex;
|
||||
uint32_t state;
|
||||
bool platform_support;
|
||||
};
|
||||
|
||||
#define WORKLOAD_POLICY_MAX 7
|
||||
struct smu_context
|
||||
{
|
||||
@ -505,6 +519,7 @@ struct smu_context
|
||||
struct smu_power_context smu_power;
|
||||
struct smu_feature smu_feature;
|
||||
struct amd_pp_display_configuration *display_config;
|
||||
struct smu_baco_context smu_baco;
|
||||
void *od_settings;
|
||||
|
||||
uint32_t pstate_sclk;
|
||||
@ -680,6 +695,11 @@ struct smu_funcs
|
||||
int (*register_irq_handler)(struct smu_context *smu);
|
||||
int (*set_azalia_d3_pme)(struct smu_context *smu);
|
||||
int (*get_max_sustainable_clocks_by_dc)(struct smu_context *smu, struct pp_smu_nv_clock_table *max_clocks);
|
||||
bool (*baco_is_support)(struct smu_context *smu);
|
||||
enum smu_baco_state (*baco_get_state)(struct smu_context *smu);
|
||||
int (*baco_set_state)(struct smu_context *smu, enum smu_baco_state state);
|
||||
int (*baco_reset)(struct smu_context *smu);
|
||||
|
||||
};
|
||||
|
||||
#define smu_init_microcode(smu) \
|
||||
@ -892,6 +912,12 @@ struct smu_funcs
|
||||
((smu)->funcs->get_max_sustainable_clocks_by_dc ? (smu)->funcs->get_max_sustainable_clocks_by_dc((smu), (max_clocks)) : 0)
|
||||
#define smu_get_uclk_dpm_states(smu, clocks_in_khz, num_states) \
|
||||
((smu)->ppt_funcs->get_uclk_dpm_states ? (smu)->ppt_funcs->get_uclk_dpm_states((smu), (clocks_in_khz), (num_states)) : 0)
|
||||
#define smu_baco_is_support(smu) \
|
||||
((smu)->funcs->baco_is_support? (smu)->funcs->baco_is_support((smu)) : false)
|
||||
#define smu_baco_get_state(smu, state) \
|
||||
((smu)->funcs->baco_get_state? (smu)->funcs->baco_get_state((smu), (state)) : 0)
|
||||
#define smu_baco_reset(smu) \
|
||||
((smu)->funcs->baco_reset? (smu)->funcs->baco_reset((smu)) : 0)
|
||||
|
||||
extern int smu_get_atom_data_table(struct smu_context *smu, uint32_t table,
|
||||
uint16_t *size, uint8_t *frev, uint8_t *crev,
|
||||
|
@ -695,6 +695,7 @@ struct pp_thermal_controller_info {
|
||||
uint8_t ucType;
|
||||
uint8_t ucI2cLine;
|
||||
uint8_t ucI2cAddress;
|
||||
uint8_t use_hw_fan_control;
|
||||
struct pp_fan_info fanInfo;
|
||||
struct pp_advance_fan_control_parameters advanceFanControlParameters;
|
||||
};
|
||||
|
@ -26,7 +26,7 @@
|
||||
// *** IMPORTANT ***
|
||||
// SMU TEAM: Always increment the interface version if
|
||||
// any structure is changed in this file
|
||||
#define SMU11_DRIVER_IF_VERSION 0x32
|
||||
#define SMU11_DRIVER_IF_VERSION 0x33
|
||||
|
||||
#define PPTABLE_NV10_SMU_VERSION 8
|
||||
|
||||
@ -813,8 +813,8 @@ typedef struct {
|
||||
uint16_t UclkAverageLpfTau;
|
||||
uint16_t GfxActivityLpfTau;
|
||||
uint16_t UclkActivityLpfTau;
|
||||
uint16_t SocketPowerLpfTau;
|
||||
|
||||
uint16_t Padding;
|
||||
// Padding - ignore
|
||||
uint32_t MmHubPadding[8]; // SMU internal use
|
||||
} DriverSmuConfig_t;
|
||||
@ -853,7 +853,7 @@ typedef struct {
|
||||
uint8_t CurrGfxVoltageOffset ;
|
||||
uint8_t CurrMemVidOffset ;
|
||||
uint8_t Padding8 ;
|
||||
uint16_t CurrSocketPower ;
|
||||
uint16_t AverageSocketPower ;
|
||||
uint16_t TemperatureEdge ;
|
||||
uint16_t TemperatureHotspot ;
|
||||
uint16_t TemperatureMem ;
|
||||
|
@ -105,6 +105,14 @@ struct smu_11_0_power_context {
|
||||
enum smu_11_0_power_state power_state;
|
||||
};
|
||||
|
||||
enum smu_v11_0_baco_seq {
|
||||
BACO_SEQ_BACO = 0,
|
||||
BACO_SEQ_MSR,
|
||||
BACO_SEQ_BAMACO,
|
||||
BACO_SEQ_ULPS,
|
||||
BACO_SEQ_COUNT,
|
||||
};
|
||||
|
||||
void smu_v11_0_set_smu_funcs(struct smu_context *smu);
|
||||
|
||||
#endif
|
||||
|
@ -115,6 +115,7 @@ static int navi10_message_map[SMU_MSG_MAX_COUNT] = {
|
||||
MSG_MAP(PowerUpJpeg, PPSMC_MSG_PowerUpJpeg),
|
||||
MSG_MAP(PowerDownJpeg, PPSMC_MSG_PowerDownJpeg),
|
||||
MSG_MAP(BacoAudioD3PME, PPSMC_MSG_BacoAudioD3PME),
|
||||
MSG_MAP(ArmD3, PPSMC_MSG_ArmD3),
|
||||
};
|
||||
|
||||
static int navi10_clk_map[SMU_CLK_COUNT] = {
|
||||
@ -478,6 +479,7 @@ static int navi10_store_powerplay_table(struct smu_context *smu)
|
||||
{
|
||||
struct smu_11_0_powerplay_table *powerplay_table = NULL;
|
||||
struct smu_table_context *table_context = &smu->smu_table;
|
||||
struct smu_baco_context *smu_baco = &smu->smu_baco;
|
||||
|
||||
if (!table_context->power_play_table)
|
||||
return -EINVAL;
|
||||
@ -489,6 +491,12 @@ static int navi10_store_powerplay_table(struct smu_context *smu)
|
||||
|
||||
table_context->thermal_controller_type = powerplay_table->thermal_controller_type;
|
||||
|
||||
mutex_lock(&smu_baco->mutex);
|
||||
if (powerplay_table->platform_caps & SMU_11_0_PP_PLATFORM_CAP_BACO ||
|
||||
powerplay_table->platform_caps & SMU_11_0_PP_PLATFORM_CAP_MACO)
|
||||
smu_baco->platform_support = true;
|
||||
mutex_unlock(&smu_baco->mutex);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -598,12 +606,14 @@ static int navi10_get_current_clk_freq_by_table(struct smu_context *smu,
|
||||
enum smu_clk_type clk_type,
|
||||
uint32_t *value)
|
||||
{
|
||||
static SmuMetrics_t metrics = {0};
|
||||
static SmuMetrics_t metrics;
|
||||
int ret = 0, clk_id = 0;
|
||||
|
||||
if (!value)
|
||||
return -EINVAL;
|
||||
|
||||
memset(&metrics, 0, sizeof(metrics));
|
||||
|
||||
ret = smu_update_table(smu, SMU_TABLE_SMU_METRICS, (void *)&metrics, false);
|
||||
if (ret)
|
||||
return ret;
|
||||
@ -863,12 +873,13 @@ static int navi10_get_gpu_power(struct smu_context *smu, uint32_t *value)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
*value = metrics.CurrSocketPower << 8;
|
||||
*value = metrics.AverageSocketPower << 8;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int navi10_get_current_activity_percent(struct smu_context *smu,
|
||||
enum amd_pp_sensors sensor,
|
||||
uint32_t *value)
|
||||
{
|
||||
int ret = 0;
|
||||
@ -884,7 +895,17 @@ static int navi10_get_current_activity_percent(struct smu_context *smu,
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
*value = metrics.AverageGfxActivity;
|
||||
switch (sensor) {
|
||||
case AMDGPU_PP_SENSOR_GPU_LOAD:
|
||||
*value = metrics.AverageGfxActivity;
|
||||
break;
|
||||
case AMDGPU_PP_SENSOR_MEM_LOAD:
|
||||
*value = metrics.AverageUclkActivity;
|
||||
break;
|
||||
default:
|
||||
pr_err("Invalid sensor for retrieving clock activity\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -902,12 +923,14 @@ static bool navi10_is_dpm_running(struct smu_context *smu)
|
||||
|
||||
static int navi10_get_fan_speed(struct smu_context *smu, uint16_t *value)
|
||||
{
|
||||
SmuMetrics_t metrics = {0};
|
||||
SmuMetrics_t metrics;
|
||||
int ret = 0;
|
||||
|
||||
if (!value)
|
||||
return -EINVAL;
|
||||
|
||||
memset(&metrics, 0, sizeof(metrics));
|
||||
|
||||
ret = smu_update_table(smu, SMU_TABLE_SMU_METRICS,
|
||||
(void *)&metrics, false);
|
||||
if (ret)
|
||||
@ -1247,6 +1270,41 @@ static int navi10_set_watermarks_table(struct smu_context *smu,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int navi10_thermal_get_temperature(struct smu_context *smu,
|
||||
enum amd_pp_sensors sensor,
|
||||
uint32_t *value)
|
||||
{
|
||||
SmuMetrics_t metrics;
|
||||
int ret = 0;
|
||||
|
||||
if (!value)
|
||||
return -EINVAL;
|
||||
|
||||
ret = smu_update_table(smu, SMU_TABLE_SMU_METRICS, (void *)&metrics, false);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
switch (sensor) {
|
||||
case AMDGPU_PP_SENSOR_HOTSPOT_TEMP:
|
||||
*value = metrics.TemperatureHotspot *
|
||||
SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
|
||||
break;
|
||||
case AMDGPU_PP_SENSOR_EDGE_TEMP:
|
||||
*value = metrics.TemperatureEdge *
|
||||
SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
|
||||
break;
|
||||
case AMDGPU_PP_SENSOR_MEM_TEMP:
|
||||
*value = metrics.TemperatureMem *
|
||||
SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
|
||||
break;
|
||||
default:
|
||||
pr_err("Invalid sensor for retrieving temp\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int navi10_read_sensor(struct smu_context *smu,
|
||||
enum amd_pp_sensors sensor,
|
||||
void *data, uint32_t *size)
|
||||
@ -1260,14 +1318,21 @@ static int navi10_read_sensor(struct smu_context *smu,
|
||||
*(uint32_t *)data = pptable->FanMaximumRpm;
|
||||
*size = 4;
|
||||
break;
|
||||
case AMDGPU_PP_SENSOR_MEM_LOAD:
|
||||
case AMDGPU_PP_SENSOR_GPU_LOAD:
|
||||
ret = navi10_get_current_activity_percent(smu, (uint32_t *)data);
|
||||
ret = navi10_get_current_activity_percent(smu, sensor, (uint32_t *)data);
|
||||
*size = 4;
|
||||
break;
|
||||
case AMDGPU_PP_SENSOR_GPU_POWER:
|
||||
ret = navi10_get_gpu_power(smu, (uint32_t *)data);
|
||||
*size = 4;
|
||||
break;
|
||||
case AMDGPU_PP_SENSOR_HOTSPOT_TEMP:
|
||||
case AMDGPU_PP_SENSOR_EDGE_TEMP:
|
||||
case AMDGPU_PP_SENSOR_MEM_TEMP:
|
||||
ret = navi10_thermal_get_temperature(smu, sensor, (uint32_t *)data);
|
||||
*size = 4;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -40,6 +40,7 @@
|
||||
#include "asic_reg/mp/mp_11_0_offset.h"
|
||||
#include "asic_reg/mp/mp_11_0_sh_mask.h"
|
||||
#include "asic_reg/nbio/nbio_7_4_offset.h"
|
||||
#include "asic_reg/nbio/nbio_7_4_sh_mask.h"
|
||||
#include "asic_reg/smuio/smuio_11_0_0_offset.h"
|
||||
#include "asic_reg/smuio/smuio_11_0_0_sh_mask.h"
|
||||
|
||||
@ -67,9 +68,9 @@ static int smu_v11_0_read_arg(struct smu_context *smu, uint32_t *arg)
|
||||
static int smu_v11_0_wait_for_response(struct smu_context *smu)
|
||||
{
|
||||
struct amdgpu_device *adev = smu->adev;
|
||||
uint32_t cur_value, i;
|
||||
uint32_t cur_value, i, timeout = adev->usec_timeout * 10;
|
||||
|
||||
for (i = 0; i < adev->usec_timeout; i++) {
|
||||
for (i = 0; i < timeout; i++) {
|
||||
cur_value = RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90);
|
||||
if ((cur_value & MP1_C2PMSG_90__CONTENT_MASK) != 0)
|
||||
break;
|
||||
@ -77,7 +78,7 @@ static int smu_v11_0_wait_for_response(struct smu_context *smu)
|
||||
}
|
||||
|
||||
/* timeout means wrong logic */
|
||||
if (i == adev->usec_timeout)
|
||||
if (i == timeout)
|
||||
return -ETIME;
|
||||
|
||||
return RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90) == 0x1 ? 0 : -EIO;
|
||||
@ -1088,7 +1089,7 @@ static int smu_v11_0_get_current_clk_freq(struct smu_context *smu,
|
||||
uint32_t *value)
|
||||
{
|
||||
int ret = 0;
|
||||
uint32_t freq;
|
||||
uint32_t freq = 0;
|
||||
|
||||
if (clk_id >= SMU_CLK_COUNT || !value)
|
||||
return -EINVAL;
|
||||
@ -1642,6 +1643,92 @@ static int smu_v11_0_set_azalia_d3_pme(struct smu_context *smu)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int smu_v11_0_baco_set_armd3_sequence(struct smu_context *smu, enum smu_v11_0_baco_seq baco_seq)
|
||||
{
|
||||
return smu_send_smc_msg_with_param(smu, SMU_MSG_ArmD3, baco_seq);
|
||||
}
|
||||
|
||||
static bool smu_v11_0_baco_is_support(struct smu_context *smu)
|
||||
{
|
||||
struct amdgpu_device *adev = smu->adev;
|
||||
struct smu_baco_context *smu_baco = &smu->smu_baco;
|
||||
uint32_t val;
|
||||
bool baco_support;
|
||||
|
||||
mutex_lock(&smu_baco->mutex);
|
||||
baco_support = smu_baco->platform_support;
|
||||
mutex_unlock(&smu_baco->mutex);
|
||||
|
||||
if (!baco_support)
|
||||
return false;
|
||||
|
||||
if (!smu_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT))
|
||||
return false;
|
||||
|
||||
val = RREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP0);
|
||||
if (val & RCC_BIF_STRAP0__STRAP_PX_CAPABLE_MASK)
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static enum smu_baco_state smu_v11_0_baco_get_state(struct smu_context *smu)
|
||||
{
|
||||
struct smu_baco_context *smu_baco = &smu->smu_baco;
|
||||
enum smu_baco_state baco_state = SMU_BACO_STATE_EXIT;
|
||||
|
||||
mutex_lock(&smu_baco->mutex);
|
||||
baco_state = smu_baco->state;
|
||||
mutex_unlock(&smu_baco->mutex);
|
||||
|
||||
return baco_state;
|
||||
}
|
||||
|
||||
static int smu_v11_0_baco_set_state(struct smu_context *smu, enum smu_baco_state state)
|
||||
{
|
||||
|
||||
struct smu_baco_context *smu_baco = &smu->smu_baco;
|
||||
int ret = 0;
|
||||
|
||||
if (smu_v11_0_baco_get_state(smu) == state)
|
||||
return 0;
|
||||
|
||||
mutex_lock(&smu_baco->mutex);
|
||||
|
||||
if (state == SMU_BACO_STATE_ENTER)
|
||||
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnterBaco, BACO_SEQ_BACO);
|
||||
else
|
||||
ret = smu_send_smc_msg(smu, SMU_MSG_ExitBaco);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
smu_baco->state = state;
|
||||
out:
|
||||
mutex_unlock(&smu_baco->mutex);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int smu_v11_0_baco_reset(struct smu_context *smu)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
ret = smu_v11_0_baco_set_armd3_sequence(smu, BACO_SEQ_BACO);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = smu_v11_0_baco_set_state(smu, SMU_BACO_STATE_ENTER);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
msleep(10);
|
||||
|
||||
ret = smu_v11_0_baco_set_state(smu, SMU_BACO_STATE_EXIT);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static const struct smu_funcs smu_v11_0_funcs = {
|
||||
.init_microcode = smu_v11_0_init_microcode,
|
||||
.load_microcode = smu_v11_0_load_microcode,
|
||||
@ -1690,6 +1777,10 @@ static const struct smu_funcs smu_v11_0_funcs = {
|
||||
.register_irq_handler = smu_v11_0_register_irq_handler,
|
||||
.set_azalia_d3_pme = smu_v11_0_set_azalia_d3_pme,
|
||||
.get_max_sustainable_clocks_by_dc = smu_v11_0_get_max_sustainable_clocks_by_dc,
|
||||
.baco_is_support = smu_v11_0_baco_is_support,
|
||||
.baco_get_state = smu_v11_0_baco_get_state,
|
||||
.baco_set_state = smu_v11_0_baco_set_state,
|
||||
.baco_reset = smu_v11_0_baco_reset,
|
||||
};
|
||||
|
||||
void smu_v11_0_set_smu_funcs(struct smu_context *smu)
|
||||
|
@ -2094,6 +2094,10 @@ static int polaris10_thermal_setup_fan_table(struct pp_hwmgr *hwmgr)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* use hardware fan control */
|
||||
if (hwmgr->thermal_controller.use_hw_fan_control)
|
||||
return 0;
|
||||
|
||||
tmp64 = hwmgr->thermal_controller.advanceFanControlParameters.
|
||||
usPWMMin * duty100;
|
||||
do_div(tmp64, 10000);
|
||||
|
@ -3016,15 +3016,17 @@ static int vega20_get_fan_speed_percent(struct smu_context *smu,
|
||||
uint32_t *speed)
|
||||
{
|
||||
int ret = 0;
|
||||
uint32_t percent = 0;
|
||||
uint32_t current_rpm;
|
||||
uint32_t current_rpm = 0, percent = 0;
|
||||
PPTable_t *pptable = smu->smu_table.driver_pptable;
|
||||
|
||||
ret = smu_get_current_rpm(smu, ¤t_rpm);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
percent = current_rpm * 100 / pptable->FanMaximumRpm;
|
||||
*speed = percent > 100 ? 100 : percent;
|
||||
|
||||
return ret;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int vega20_get_gpu_power(struct smu_context *smu, uint32_t *value)
|
||||
|
Loading…
Reference in New Issue
Block a user