Merge tag 'amd-drm-fixes-6.3-2023-03-02' of https://gitlab.freedesktop.org/agd5f/linux into drm-next

amd-drm-fixes-6.3-2023-03-02:

amdgpu:
- SMU 13 fixes
- Enable TMZ for GC 10.3.6
- Misc display fixes
- Buddy allocator fixes
- GC 11 fixes
- S0ix fix
- INFO IOCTL queries for GC 11
- VCN harvest fixes for SR-IOV
- UMC 8.10 RAS fixes
- Don't restrict bpc to 8
- NBIO 7.5 fix
- Allow freesync on PCon for more devices

amdkfd:
- SDMA fix
- Illegal memory access fix

radeon:
- Display fix for iMac11,2

UAPI:
- Add some additional INFO IOCTL queries for GC 11 fixes
  Mesa MR: https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/21403

Signed-off-by: Dave Airlie <airlied@redhat.com>
From: Alex Deucher <alexander.deucher@amd.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20230302051843.7793-1-alexander.deucher@amd.com
This commit is contained in:
Dave Airlie 2023-03-03 08:26:59 +10:00
commit 54ceb92724
39 changed files with 421 additions and 129 deletions

View File

@ -6,6 +6,7 @@ config DRM_AMDGPU
select FW_LOADER
select DRM_DISPLAY_DP_HELPER
select DRM_DISPLAY_HDMI_HELPER
select DRM_DISPLAY_HDCP_HELPER
select DRM_DISPLAY_HELPER
select DRM_KMS_HELPER
select DRM_SCHED

View File

@ -1073,6 +1073,9 @@ bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev)
(pm_suspend_target_state != PM_SUSPEND_TO_IDLE))
return false;
if (adev->asic_type < CHIP_RAVEN)
return false;
/*
* If ACPI_FADT_LOW_POWER_S0 is not set in the FADT, it is generally
* risky to do any special firmware-related preparations for entering

View File

@ -107,9 +107,12 @@
* - 3.50.0 - Update AMDGPU_INFO_DEV_INFO IOCTL for minimum engine and memory clock
* Update AMDGPU_INFO_SENSOR IOCTL for PEAK_PSTATE engine and memory clock
* 3.51.0 - Return the PCIe gen and lanes from the INFO ioctl
* 3.52.0 - Add AMDGPU_IDS_FLAGS_CONFORMANT_TRUNC_COORD, add device_info fields:
* tcp_cache_size, num_sqc_per_wgp, sqc_data_cache_size, sqc_inst_cache_size,
* gl1c_cache_size, gl2c_cache_size, mall_size, enabled_rb_pipes_mask_hi
*/
#define KMS_DRIVER_MAJOR 3
#define KMS_DRIVER_MINOR 51
#define KMS_DRIVER_MINOR 52
#define KMS_DRIVER_PATCHLEVEL 0
unsigned int amdgpu_vram_limit = UINT_MAX;
@ -921,7 +924,7 @@ module_param_named(reset_method, amdgpu_reset_method, int, 0444);
* result in the GPU entering bad status when the number of total
* faulty pages by ECC exceeds the threshold value.
*/
MODULE_PARM_DESC(bad_page_threshold, "Bad page threshold(-1 = auto(default value), 0 = disable bad page retirement, -2 = ignore bad page threshold)");
MODULE_PARM_DESC(bad_page_threshold, "Bad page threshold(-1 = ignore threshold (default value), 0 = disable bad page retirement, -2 = driver sets threshold)");
module_param_named(bad_page_threshold, amdgpu_bad_page_threshold, int, 0444);
MODULE_PARM_DESC(num_kcq, "number of kernel compute queue user want to setup (8 if set to greater than 8 or less than 0, only affect gfx 8+)");
@ -2414,8 +2417,10 @@ static int amdgpu_pmops_suspend(struct device *dev)
if (amdgpu_acpi_is_s0ix_active(adev))
adev->in_s0ix = true;
else
else if (amdgpu_acpi_is_s3_active(adev))
adev->in_s3 = true;
if (!adev->in_s0ix && !adev->in_s3)
return 0;
return amdgpu_device_suspend(drm_dev, true);
}
@ -2436,6 +2441,9 @@ static int amdgpu_pmops_resume(struct device *dev)
struct amdgpu_device *adev = drm_to_adev(drm_dev);
int r;
if (!adev->in_s0ix && !adev->in_s3)
return 0;
/* Avoids registers access if device is physically gone */
if (!pci_device_is_present(adev->pdev))
adev->no_hw_access = true;

View File

@ -178,6 +178,8 @@ struct amdgpu_gfx_config {
uint32_t num_sc_per_sh;
uint32_t num_packer_per_sc;
uint32_t pa_sc_tile_steering_override;
/* Whether texture coordinate truncation is conformant. */
bool ta_cntl2_truncate_coord_mode;
uint64_t tcc_disabled_mask;
uint32_t gc_num_tcp_per_sa;
uint32_t gc_num_sdp_interface;

View File

@ -552,6 +552,7 @@ void amdgpu_gmc_tmz_set(struct amdgpu_device *adev)
case IP_VERSION(10, 3, 2):
case IP_VERSION(10, 3, 4):
case IP_VERSION(10, 3, 5):
case IP_VERSION(10, 3, 6):
/* VANGOGH */
case IP_VERSION(10, 3, 1):
/* YELLOW_CARP*/

View File

@ -808,6 +808,8 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
dev_info->ids_flags |= AMDGPU_IDS_FLAGS_PREEMPTION;
if (amdgpu_is_tmz(adev))
dev_info->ids_flags |= AMDGPU_IDS_FLAGS_TMZ;
if (adev->gfx.config.ta_cntl2_truncate_coord_mode)
dev_info->ids_flags |= AMDGPU_IDS_FLAGS_CONFORMANT_TRUNC_COORD;
vm_size = adev->vm_manager.max_pfn * AMDGPU_GPU_PAGE_SIZE;
vm_size -= AMDGPU_VA_RESERVED_SIZE;
@ -865,6 +867,15 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 ? 4 :
adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 ? 2 : 1;
dev_info->tcp_cache_size = adev->gfx.config.gc_tcp_l1_size;
dev_info->num_sqc_per_wgp = adev->gfx.config.gc_num_sqc_per_wgp;
dev_info->sqc_data_cache_size = adev->gfx.config.gc_l1_data_cache_size_per_sqc;
dev_info->sqc_inst_cache_size = adev->gfx.config.gc_l1_instruction_cache_size_per_sqc;
dev_info->gl1c_cache_size = adev->gfx.config.gc_gl1c_size_per_instance *
adev->gfx.config.gc_gl1c_per_sa;
dev_info->gl2c_cache_size = adev->gfx.config.gc_gl2c_per_gpu;
dev_info->mall_size = adev->gmc.mall_size;
ret = copy_to_user(out, dev_info,
min((size_t)size, sizeof(*dev_info))) ? -EFAULT : 0;
kfree(dev_info);

View File

@ -139,7 +139,7 @@ void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)
places[c].lpfn = visible_pfn;
else
else if (adev->gmc.real_vram_size != adev->gmc.visible_vram_size)
places[c].flags |= TTM_PL_FLAG_TOPDOWN;
if (flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)

View File

@ -1683,7 +1683,7 @@ static int psp_hdcp_initialize(struct psp_context *psp)
psp->hdcp_context.context.mem_context.shared_mem_size = PSP_HDCP_SHARED_MEM_SIZE;
psp->hdcp_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
if (!psp->hdcp_context.context.initialized) {
if (!psp->hdcp_context.context.mem_context.shared_buf) {
ret = psp_ta_init_shared_buf(psp, &psp->hdcp_context.context.mem_context);
if (ret)
return ret;
@ -1750,7 +1750,7 @@ static int psp_dtm_initialize(struct psp_context *psp)
psp->dtm_context.context.mem_context.shared_mem_size = PSP_DTM_SHARED_MEM_SIZE;
psp->dtm_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
if (!psp->dtm_context.context.initialized) {
if (!psp->dtm_context.context.mem_context.shared_buf) {
ret = psp_ta_init_shared_buf(psp, &psp->dtm_context.context.mem_context);
if (ret)
return ret;
@ -1818,7 +1818,7 @@ static int psp_rap_initialize(struct psp_context *psp)
psp->rap_context.context.mem_context.shared_mem_size = PSP_RAP_SHARED_MEM_SIZE;
psp->rap_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
if (!psp->rap_context.context.initialized) {
if (!psp->rap_context.context.mem_context.shared_buf) {
ret = psp_ta_init_shared_buf(psp, &psp->rap_context.context.mem_context);
if (ret)
return ret;

View File

@ -176,7 +176,7 @@ static int amdgpu_reserve_page_direct(struct amdgpu_device *adev, uint64_t addre
if (amdgpu_bad_page_threshold != 0) {
amdgpu_ras_add_bad_pages(adev, err_data.err_addr,
err_data.err_addr_cnt);
amdgpu_ras_save_bad_pages(adev);
amdgpu_ras_save_bad_pages(adev, NULL);
}
dev_warn(adev->dev, "WARNING: THIS IS ONLY FOR TEST PURPOSES AND WILL CORRUPT RAS EEPROM\n");
@ -2084,22 +2084,32 @@ out:
/*
* write error record array to eeprom, the function should be
* protected by recovery_lock
* new_cnt: new added UE count, excluding reserved bad pages, can be NULL
*/
int amdgpu_ras_save_bad_pages(struct amdgpu_device *adev)
int amdgpu_ras_save_bad_pages(struct amdgpu_device *adev,
unsigned long *new_cnt)
{
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
struct ras_err_handler_data *data;
struct amdgpu_ras_eeprom_control *control;
int save_count;
if (!con || !con->eh_data)
if (!con || !con->eh_data) {
if (new_cnt)
*new_cnt = 0;
return 0;
}
mutex_lock(&con->recovery_lock);
control = &con->eeprom_control;
data = con->eh_data;
save_count = data->count - control->ras_num_recs;
mutex_unlock(&con->recovery_lock);
if (new_cnt)
*new_cnt = save_count / adev->umc.retire_unit;
/* only new entries are saved */
if (save_count > 0) {
if (amdgpu_ras_eeprom_append(control,
@ -2186,11 +2196,12 @@ static void amdgpu_ras_validate_threshold(struct amdgpu_device *adev,
/*
* Justification of value bad_page_cnt_threshold in ras structure
*
* Generally, -1 <= amdgpu_bad_page_threshold <= max record length
* in eeprom, and introduce two scenarios accordingly.
* Generally, 0 <= amdgpu_bad_page_threshold <= max record length
* in eeprom or amdgpu_bad_page_threshold == -2, introduce two
* scenarios accordingly.
*
* Bad page retirement enablement:
* - If amdgpu_bad_page_threshold = -1,
* - If amdgpu_bad_page_threshold = -2,
* bad_page_cnt_threshold = typical value by formula.
*
* - When the value from user is 0 < amdgpu_bad_page_threshold <

View File

@ -547,7 +547,8 @@ int amdgpu_ras_query_error_count(struct amdgpu_device *adev,
int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev,
struct eeprom_table_record *bps, int pages);
int amdgpu_ras_save_bad_pages(struct amdgpu_device *adev);
int amdgpu_ras_save_bad_pages(struct amdgpu_device *adev,
unsigned long *new_cnt);
static inline enum ta_ras_block
amdgpu_ras_block_to_ta(enum amdgpu_ras_block block) {

View File

@ -417,7 +417,8 @@ bool amdgpu_ras_eeprom_check_err_threshold(struct amdgpu_device *adev)
{
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
if (!__is_ras_eeprom_supported(adev))
if (!__is_ras_eeprom_supported(adev) ||
!amdgpu_bad_page_threshold)
return false;
/* skip check eeprom table for VEGA20 Gaming */
@ -428,10 +429,18 @@ bool amdgpu_ras_eeprom_check_err_threshold(struct amdgpu_device *adev)
return false;
if (con->eeprom_control.tbl_hdr.header == RAS_TABLE_HDR_BAD) {
dev_warn(adev->dev, "This GPU is in BAD status.");
dev_warn(adev->dev, "Please retire it or set a larger "
"threshold value when reloading driver.\n");
return true;
if (amdgpu_bad_page_threshold == -1) {
dev_warn(adev->dev, "RAS records:%d exceed threshold:%d",
con->eeprom_control.ras_num_recs, con->bad_page_cnt_threshold);
dev_warn(adev->dev,
"But GPU can be operated due to bad_page_threshold = -1.\n");
return false;
} else {
dev_warn(adev->dev, "This GPU is in BAD status.");
dev_warn(adev->dev, "Please retire it or set a larger "
"threshold value when reloading driver.\n");
return true;
}
}
return false;
@ -1191,8 +1200,8 @@ int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control,
} else {
dev_err(adev->dev, "RAS records:%d exceed threshold:%d",
control->ras_num_recs, ras->bad_page_cnt_threshold);
if (amdgpu_bad_page_threshold == -2) {
dev_warn(adev->dev, "GPU will be initialized due to bad_page_threshold = -2.");
if (amdgpu_bad_page_threshold == -1) {
dev_warn(adev->dev, "GPU will be initialized due to bad_page_threshold = -1.");
res = 0;
} else {
*exceed_err_limit = true;

View File

@ -68,7 +68,7 @@ int amdgpu_umc_page_retirement_mca(struct amdgpu_device *adev,
if (amdgpu_bad_page_threshold != 0) {
amdgpu_ras_add_bad_pages(adev, err_data.err_addr,
err_data.err_addr_cnt);
amdgpu_ras_save_bad_pages(adev);
amdgpu_ras_save_bad_pages(adev, NULL);
}
out:
@ -147,7 +147,7 @@ static int amdgpu_umc_do_page_retirement(struct amdgpu_device *adev,
err_data->err_addr_cnt) {
amdgpu_ras_add_bad_pages(adev, err_data->err_addr,
err_data->err_addr_cnt);
amdgpu_ras_save_bad_pages(adev);
amdgpu_ras_save_bad_pages(adev, &(err_data->ue_count));
amdgpu_dpm_send_hbm_bad_pages_num(adev, con->eeprom_control.ras_num_recs);

View File

@ -74,6 +74,8 @@ struct amdgpu_umc {
/* UMC regiser per channel offset */
uint32_t channel_offs;
/* how many pages are retired in one UE */
uint32_t retire_unit;
/* channel index table of interleaved memory */
const uint32_t *channel_idx_tbl;
struct ras_common_if *ras_if;

View File

@ -453,7 +453,8 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
/* Limit maximum size to 2GiB due to SG table limitations */
size = min(remaining_size, 2ULL << 30);
if (size >= (u64)pages_per_block << PAGE_SHIFT)
if ((size >= (u64)pages_per_block << PAGE_SHIFT) &&
!(size & (((u64)pages_per_block << PAGE_SHIFT) - 1)))
min_block_size = (u64)pages_per_block << PAGE_SHIFT;
cur_size = size;

View File

@ -1503,44 +1503,70 @@ static void gfx_v11_0_select_se_sh(struct amdgpu_device *adev, u32 se_num,
WREG32_SOC15(GC, 0, regGRBM_GFX_INDEX, data);
}
static u32 gfx_v11_0_get_sa_active_bitmap(struct amdgpu_device *adev)
{
u32 gc_disabled_sa_mask, gc_user_disabled_sa_mask, sa_mask;
gc_disabled_sa_mask = RREG32_SOC15(GC, 0, regCC_GC_SA_UNIT_DISABLE);
gc_disabled_sa_mask = REG_GET_FIELD(gc_disabled_sa_mask,
CC_GC_SA_UNIT_DISABLE,
SA_DISABLE);
gc_user_disabled_sa_mask = RREG32_SOC15(GC, 0, regGC_USER_SA_UNIT_DISABLE);
gc_user_disabled_sa_mask = REG_GET_FIELD(gc_user_disabled_sa_mask,
GC_USER_SA_UNIT_DISABLE,
SA_DISABLE);
sa_mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_sh_per_se *
adev->gfx.config.max_shader_engines);
return sa_mask & (~(gc_disabled_sa_mask | gc_user_disabled_sa_mask));
}
static u32 gfx_v11_0_get_rb_active_bitmap(struct amdgpu_device *adev)
{
u32 data, mask;
u32 gc_disabled_rb_mask, gc_user_disabled_rb_mask;
u32 rb_mask;
data = RREG32_SOC15(GC, 0, regCC_RB_BACKEND_DISABLE);
data |= RREG32_SOC15(GC, 0, regGC_USER_RB_BACKEND_DISABLE);
gc_disabled_rb_mask = RREG32_SOC15(GC, 0, regCC_RB_BACKEND_DISABLE);
gc_disabled_rb_mask = REG_GET_FIELD(gc_disabled_rb_mask,
CC_RB_BACKEND_DISABLE,
BACKEND_DISABLE);
gc_user_disabled_rb_mask = RREG32_SOC15(GC, 0, regGC_USER_RB_BACKEND_DISABLE);
gc_user_disabled_rb_mask = REG_GET_FIELD(gc_user_disabled_rb_mask,
GC_USER_RB_BACKEND_DISABLE,
BACKEND_DISABLE);
rb_mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_backends_per_se *
adev->gfx.config.max_shader_engines);
data &= CC_RB_BACKEND_DISABLE__BACKEND_DISABLE_MASK;
data >>= GC_USER_RB_BACKEND_DISABLE__BACKEND_DISABLE__SHIFT;
mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_backends_per_se /
adev->gfx.config.max_sh_per_se);
return (~data) & mask;
return rb_mask & (~(gc_disabled_rb_mask | gc_user_disabled_rb_mask));
}
static void gfx_v11_0_setup_rb(struct amdgpu_device *adev)
{
int i, j;
u32 data;
u32 active_rbs = 0;
u32 rb_bitmap_width_per_sh = adev->gfx.config.max_backends_per_se /
adev->gfx.config.max_sh_per_se;
u32 rb_bitmap_width_per_sa;
u32 max_sa;
u32 active_sa_bitmap;
u32 global_active_rb_bitmap;
u32 active_rb_bitmap = 0;
u32 i;
mutex_lock(&adev->grbm_idx_mutex);
for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
gfx_v11_0_select_se_sh(adev, i, j, 0xffffffff);
data = gfx_v11_0_get_rb_active_bitmap(adev);
active_rbs |= data << ((i * adev->gfx.config.max_sh_per_se + j) *
rb_bitmap_width_per_sh);
}
/* query sa bitmap from SA_UNIT_DISABLE registers */
active_sa_bitmap = gfx_v11_0_get_sa_active_bitmap(adev);
/* query rb bitmap from RB_BACKEND_DISABLE registers */
global_active_rb_bitmap = gfx_v11_0_get_rb_active_bitmap(adev);
/* generate active rb bitmap according to active sa bitmap */
max_sa = adev->gfx.config.max_shader_engines *
adev->gfx.config.max_sh_per_se;
rb_bitmap_width_per_sa = adev->gfx.config.max_backends_per_se /
adev->gfx.config.max_sh_per_se;
for (i = 0; i < max_sa; i++) {
if (active_sa_bitmap & (1 << i))
active_rb_bitmap |= (0x3 << (i * rb_bitmap_width_per_sa));
}
gfx_v11_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
mutex_unlock(&adev->grbm_idx_mutex);
adev->gfx.config.backend_enable_mask = active_rbs;
adev->gfx.config.num_rbs = hweight32(active_rbs);
active_rb_bitmap |= global_active_rb_bitmap;
adev->gfx.config.backend_enable_mask = active_rb_bitmap;
adev->gfx.config.num_rbs = hweight32(active_rb_bitmap);
}
#define DEFAULT_SH_MEM_BASES (0x6000)
@ -1633,6 +1659,11 @@ static void gfx_v11_0_constants_init(struct amdgpu_device *adev)
gfx_v11_0_get_tcc_info(adev);
adev->gfx.config.pa_sc_tile_steering_override = 0;
/* Set whether texture coordinate truncation is conformant. */
tmp = RREG32_SOC15(GC, 0, regTA_CNTL2);
adev->gfx.config.ta_cntl2_truncate_coord_mode =
REG_GET_FIELD(tmp, TA_CNTL2, TRUNCATE_COORD_MODE);
/* XXX SH_MEM regs */
/* where to put LDS, scratch, GPUVM in FSA64 space */
mutex_lock(&adev->srbm_mutex);

View File

@ -692,6 +692,7 @@ static void gmc_v10_0_set_umc_funcs(struct amdgpu_device *adev)
adev->umc.channel_inst_num = UMC_V8_7_CHANNEL_INSTANCE_NUM;
adev->umc.umc_inst_num = UMC_V8_7_UMC_INSTANCE_NUM;
adev->umc.channel_offs = UMC_V8_7_PER_CHANNEL_OFFSET_SIENNA;
adev->umc.retire_unit = 1;
adev->umc.channel_idx_tbl = &umc_v8_7_channel_idx_tbl[0][0];
adev->umc.ras = &umc_v8_7_ras;
break;

View File

@ -570,6 +570,7 @@ static void gmc_v11_0_set_umc_funcs(struct amdgpu_device *adev)
adev->umc.node_inst_num = adev->gmc.num_umc;
adev->umc.max_ras_err_cnt_per_query = UMC_V8_10_TOTAL_CHANNEL_NUM(adev);
adev->umc.channel_offs = UMC_V8_10_PER_CHANNEL_OFFSET;
adev->umc.retire_unit = UMC_V8_10_NA_COL_2BITS_POWER_OF_2_NUM;
if (adev->umc.node_inst_num == 4)
adev->umc.channel_idx_tbl = &umc_v8_10_channel_idx_tbl_ext0[0][0][0];
else

View File

@ -1288,6 +1288,7 @@ static void gmc_v9_0_set_umc_funcs(struct amdgpu_device *adev)
adev->umc.channel_inst_num = UMC_V6_1_CHANNEL_INSTANCE_NUM;
adev->umc.umc_inst_num = UMC_V6_1_UMC_INSTANCE_NUM;
adev->umc.channel_offs = UMC_V6_1_PER_CHANNEL_OFFSET_VG20;
adev->umc.retire_unit = 1;
adev->umc.channel_idx_tbl = &umc_v6_1_channel_idx_tbl[0][0];
adev->umc.ras = &umc_v6_1_ras;
break;
@ -1296,6 +1297,7 @@ static void gmc_v9_0_set_umc_funcs(struct amdgpu_device *adev)
adev->umc.channel_inst_num = UMC_V6_1_CHANNEL_INSTANCE_NUM;
adev->umc.umc_inst_num = UMC_V6_1_UMC_INSTANCE_NUM;
adev->umc.channel_offs = UMC_V6_1_PER_CHANNEL_OFFSET_ARCT;
adev->umc.retire_unit = 1;
adev->umc.channel_idx_tbl = &umc_v6_1_channel_idx_tbl[0][0];
adev->umc.ras = &umc_v6_1_ras;
break;
@ -1305,6 +1307,7 @@ static void gmc_v9_0_set_umc_funcs(struct amdgpu_device *adev)
adev->umc.channel_inst_num = UMC_V6_7_CHANNEL_INSTANCE_NUM;
adev->umc.umc_inst_num = UMC_V6_7_UMC_INSTANCE_NUM;
adev->umc.channel_offs = UMC_V6_7_PER_CHANNEL_OFFSET;
adev->umc.retire_unit = (UMC_V6_7_NA_MAP_PA_NUM * 2);
if (!adev->gmc.xgmi.connected_to_cpu)
adev->umc.ras = &umc_v6_7_ras;
if (1 & adev->smuio.funcs->get_die_id(adev))

View File

@ -382,6 +382,11 @@ static void nbio_v7_2_init_registers(struct amdgpu_device *adev)
if (def != data)
WREG32_PCIE_PORT(SOC15_REG_OFFSET(NBIO, 0, regBIF1_PCIE_MST_CTRL_3), data);
break;
case IP_VERSION(7, 5, 1):
data = RREG32_SOC15(NBIO, 0, regRCC_DEV2_EPF0_STRAP2);
data &= ~RCC_DEV2_EPF0_STRAP2__STRAP_NO_SOFT_RESET_DEV2_F0_MASK;
WREG32_SOC15(NBIO, 0, regRCC_DEV2_EPF0_STRAP2, data);
fallthrough;
default:
def = data = RREG32_PCIE_PORT(SOC15_REG_OFFSET(NBIO, 0, regPCIE_CONFIG_CNTL));
data = REG_SET_FIELD(data, PCIE_CONFIG_CNTL,

View File

@ -209,6 +209,45 @@ static int umc_v8_10_swizzle_mode_na_to_pa(struct amdgpu_device *adev,
return 0;
}
static void umc_v8_10_convert_error_address(struct amdgpu_device *adev,
struct ras_err_data *err_data, uint64_t err_addr,
uint32_t ch_inst, uint32_t umc_inst,
uint32_t node_inst, uint64_t mc_umc_status)
{
uint64_t na_err_addr_base;
uint64_t na_err_addr, retired_page_addr;
uint32_t channel_index, addr_lsb, col = 0;
int ret = 0;
channel_index =
adev->umc.channel_idx_tbl[node_inst * adev->umc.umc_inst_num *
adev->umc.channel_inst_num +
umc_inst * adev->umc.channel_inst_num +
ch_inst];
/* the lowest lsb bits should be ignored */
addr_lsb = REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, AddrLsb);
err_addr &= ~((0x1ULL << addr_lsb) - 1);
na_err_addr_base = err_addr & ~(0x3ULL << UMC_V8_10_NA_C5_BIT);
/* loop for all possibilities of [C6 C5] in normal address. */
for (col = 0; col < UMC_V8_10_NA_COL_2BITS_POWER_OF_2_NUM; col++) {
na_err_addr = na_err_addr_base | (col << UMC_V8_10_NA_C5_BIT);
/* Mapping normal error address to retired soc physical address. */
ret = umc_v8_10_swizzle_mode_na_to_pa(adev, channel_index,
na_err_addr, &retired_page_addr);
if (ret) {
dev_err(adev->dev, "Failed to map pa from umc na.\n");
break;
}
dev_info(adev->dev, "Error Address(PA): 0x%llx\n",
retired_page_addr);
amdgpu_umc_fill_error_record(err_data, na_err_addr,
retired_page_addr, channel_index, umc_inst);
}
}
static void umc_v8_10_query_error_address(struct amdgpu_device *adev,
struct ras_err_data *err_data,
uint32_t umc_reg_offset,
@ -218,10 +257,7 @@ static void umc_v8_10_query_error_address(struct amdgpu_device *adev,
{
uint64_t mc_umc_status_addr;
uint64_t mc_umc_status, err_addr;
uint64_t mc_umc_addrt0, na_err_addr_base;
uint64_t na_err_addr, retired_page_addr;
uint32_t channel_index, addr_lsb, col = 0;
int ret = 0;
uint64_t mc_umc_addrt0;
mc_umc_status_addr =
SOC15_REG_OFFSET(UMC, 0, regMCA_UMC_UMC0_MCUMC_STATUST0);
@ -236,12 +272,6 @@ static void umc_v8_10_query_error_address(struct amdgpu_device *adev,
return;
}
channel_index =
adev->umc.channel_idx_tbl[node_inst * adev->umc.umc_inst_num *
adev->umc.channel_inst_num +
umc_inst * adev->umc.channel_inst_num +
ch_inst];
/* calculate error address if ue error is detected */
if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 &&
REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, AddrV) == 1 &&
@ -251,27 +281,8 @@ static void umc_v8_10_query_error_address(struct amdgpu_device *adev,
err_addr = RREG64_PCIE((mc_umc_addrt0 + umc_reg_offset) * 4);
err_addr = REG_GET_FIELD(err_addr, MCA_UMC_UMC0_MCUMC_ADDRT0, ErrorAddr);
/* the lowest lsb bits should be ignored */
addr_lsb = REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, AddrLsb);
err_addr &= ~((0x1ULL << addr_lsb) - 1);
na_err_addr_base = err_addr & ~(0x3ULL << UMC_V8_10_NA_C5_BIT);
/* loop for all possibilities of [C6 C5] in normal address. */
for (col = 0; col < UMC_V8_10_NA_COL_2BITS_POWER_OF_2_NUM; col++) {
na_err_addr = na_err_addr_base | (col << UMC_V8_10_NA_C5_BIT);
/* Mapping normal error address to retired soc physical address. */
ret = umc_v8_10_swizzle_mode_na_to_pa(adev, channel_index,
na_err_addr, &retired_page_addr);
if (ret) {
dev_err(adev->dev, "Failed to map pa from umc na.\n");
break;
}
dev_info(adev->dev, "Error Address(PA): 0x%llx\n",
retired_page_addr);
amdgpu_umc_fill_error_record(err_data, na_err_addr,
retired_page_addr, channel_index, umc_inst);
}
umc_v8_10_convert_error_address(adev, err_data, err_addr,
ch_inst, umc_inst, node_inst, mc_umc_status);
}
/* clear umc status */
@ -349,6 +360,133 @@ static bool umc_v8_10_query_ras_poison_mode(struct amdgpu_device *adev)
return true;
}
static void umc_v8_10_ecc_info_query_correctable_error_count(struct amdgpu_device *adev,
uint32_t node_inst, uint32_t umc_inst, uint32_t ch_inst,
unsigned long *error_count)
{
uint64_t mc_umc_status;
uint32_t eccinfo_table_idx;
struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
eccinfo_table_idx = node_inst * adev->umc.umc_inst_num *
adev->umc.channel_inst_num +
umc_inst * adev->umc.channel_inst_num +
ch_inst;
/* check the MCUMC_STATUS */
mc_umc_status = ras->umc_ecc.ecc[eccinfo_table_idx].mca_umc_status;
if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 &&
REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1) {
*error_count += 1;
}
}
static void umc_v8_10_ecc_info_query_uncorrectable_error_count(struct amdgpu_device *adev,
uint32_t node_inst, uint32_t umc_inst, uint32_t ch_inst,
unsigned long *error_count)
{
uint64_t mc_umc_status;
uint32_t eccinfo_table_idx;
struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
eccinfo_table_idx = node_inst * adev->umc.umc_inst_num *
adev->umc.channel_inst_num +
umc_inst * adev->umc.channel_inst_num +
ch_inst;
/* check the MCUMC_STATUS */
mc_umc_status = ras->umc_ecc.ecc[eccinfo_table_idx].mca_umc_status;
if ((REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1) &&
(REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Deferred) == 1 ||
REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1 ||
REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, PCC) == 1 ||
REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UC) == 1 ||
REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, TCC) == 1)) {
*error_count += 1;
}
}
static void umc_v8_10_ecc_info_query_ras_error_count(struct amdgpu_device *adev,
void *ras_error_status)
{
struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
uint32_t node_inst = 0;
uint32_t umc_inst = 0;
uint32_t ch_inst = 0;
/* TODO: driver needs to toggle DF Cstate to ensure
* safe access of UMC registers. Will add the protection
*/
LOOP_UMC_EACH_NODE_INST_AND_CH(node_inst, umc_inst, ch_inst) {
umc_v8_10_ecc_info_query_correctable_error_count(adev,
node_inst, umc_inst, ch_inst,
&(err_data->ce_count));
umc_v8_10_ecc_info_query_uncorrectable_error_count(adev,
node_inst, umc_inst, ch_inst,
&(err_data->ue_count));
}
}
static void umc_v8_10_ecc_info_query_error_address(struct amdgpu_device *adev,
struct ras_err_data *err_data,
uint32_t ch_inst,
uint32_t umc_inst,
uint32_t node_inst)
{
uint32_t eccinfo_table_idx;
uint64_t mc_umc_status, err_addr;
struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
eccinfo_table_idx = node_inst * adev->umc.umc_inst_num *
adev->umc.channel_inst_num +
umc_inst * adev->umc.channel_inst_num +
ch_inst;
mc_umc_status = ras->umc_ecc.ecc[eccinfo_table_idx].mca_umc_status;
if (mc_umc_status == 0)
return;
if (!err_data->err_addr)
return;
/* calculate error address if ue error is detected */
if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 &&
REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, AddrV) == 1 &&
(REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1)) {
err_addr = ras->umc_ecc.ecc[eccinfo_table_idx].mca_umc_addr;
err_addr = REG_GET_FIELD(err_addr, MCA_UMC_UMC0_MCUMC_ADDRT0, ErrorAddr);
umc_v8_10_convert_error_address(adev, err_data, err_addr,
ch_inst, umc_inst, node_inst, mc_umc_status);
}
}
static void umc_v8_10_ecc_info_query_ras_error_address(struct amdgpu_device *adev,
void *ras_error_status)
{
struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
uint32_t node_inst = 0;
uint32_t umc_inst = 0;
uint32_t ch_inst = 0;
/* TODO: driver needs to toggle DF Cstate to ensure
* safe access of UMC resgisters. Will add the protection
* when firmware interface is ready
*/
LOOP_UMC_EACH_NODE_INST_AND_CH(node_inst, umc_inst, ch_inst) {
umc_v8_10_ecc_info_query_error_address(adev,
err_data,
ch_inst,
umc_inst,
node_inst);
}
}
const struct amdgpu_ras_block_hw_ops umc_v8_10_ras_hw_ops = {
.query_ras_error_count = umc_v8_10_query_ras_error_count,
.query_ras_error_address = umc_v8_10_query_ras_error_address,
@ -360,4 +498,6 @@ struct amdgpu_umc_ras umc_v8_10_ras = {
},
.err_cnt_init = umc_v8_10_err_cnt_init,
.query_ras_poison_mode = umc_v8_10_query_ras_poison_mode,
.ecc_info_query_ras_error_count = umc_v8_10_ecc_info_query_ras_error_count,
.ecc_info_query_ras_error_address = umc_v8_10_ecc_info_query_ras_error_address,
};

View File

@ -78,9 +78,17 @@ static void vcn_v4_0_set_ras_funcs(struct amdgpu_device *adev);
static int vcn_v4_0_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int i;
if (amdgpu_sriov_vf(adev))
if (amdgpu_sriov_vf(adev)) {
adev->vcn.harvest_config = VCN_HARVEST_MMSCH;
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
if (amdgpu_vcn_is_disabled_vcn(adev, VCN_ENCODE_RING, i)) {
adev->vcn.harvest_config |= 1 << i;
dev_info(adev->dev, "VCN%d is disabled by hypervisor\n", i);
}
}
}
/* re-use enc ring as unified ring */
adev->vcn.num_enc_rings = 1;
@ -238,16 +246,11 @@ static int vcn_v4_0_hw_init(void *handle)
continue;
ring = &adev->vcn.inst[i].ring_enc[0];
if (amdgpu_vcn_is_disabled_vcn(adev, VCN_ENCODE_RING, i)) {
ring->sched.ready = false;
ring->no_scheduler = true;
dev_info(adev->dev, "ring %s is disabled by hypervisor\n", ring->name);
} else {
ring->wptr = 0;
ring->wptr_old = 0;
vcn_v4_0_unified_ring_set_wptr(ring);
ring->sched.ready = true;
}
ring->wptr = 0;
ring->wptr_old = 0;
vcn_v4_0_unified_ring_set_wptr(ring);
ring->sched.ready = true;
}
} else {
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {

View File

@ -2373,7 +2373,7 @@ struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev)
if (init_mqd_managers(dqm))
goto out_free;
if (allocate_hiq_sdma_mqd(dqm)) {
if (!dev->shared_resources.enable_mes && allocate_hiq_sdma_mqd(dqm)) {
pr_err("Failed to allocate hiq sdma mqd trunk buffer\n");
goto out_free;
}
@ -2397,7 +2397,8 @@ static void deallocate_hiq_sdma_mqd(struct kfd_dev *dev,
void device_queue_manager_uninit(struct device_queue_manager *dqm)
{
dqm->ops.uninitialize(dqm);
deallocate_hiq_sdma_mqd(dqm->dev, &dqm->hiq_sdma_mqd);
if (!dqm->dev->shared_resources.enable_mes)
deallocate_hiq_sdma_mqd(dqm->dev, &dqm->hiq_sdma_mqd);
kfree(dqm);
}

View File

@ -778,16 +778,13 @@ static struct kfd_event_waiter *alloc_event_waiters(uint32_t num_events)
struct kfd_event_waiter *event_waiters;
uint32_t i;
event_waiters = kmalloc_array(num_events,
sizeof(struct kfd_event_waiter),
GFP_KERNEL);
event_waiters = kcalloc(num_events, sizeof(struct kfd_event_waiter),
GFP_KERNEL);
if (!event_waiters)
return NULL;
for (i = 0; (event_waiters) && (i < num_events) ; i++) {
for (i = 0; i < num_events; i++)
init_wait(&event_waiters[i].wait);
event_waiters[i].activated = false;
}
return event_waiters;
}

View File

@ -308,11 +308,16 @@ static void init_mqd_sdma(struct mqd_manager *mm, void **mqd,
struct queue_properties *q)
{
struct v11_sdma_mqd *m;
int size;
m = (struct v11_sdma_mqd *) mqd_mem_obj->cpu_ptr;
memset(m, 0, sizeof(struct v11_sdma_mqd));
if (mm->dev->shared_resources.enable_mes)
size = PAGE_SIZE;
else
size = sizeof(struct v11_sdma_mqd);
memset(m, 0, size);
*mqd = m;
if (gart_addr)
*gart_addr = mqd_mem_obj->gpu_addr;
@ -443,6 +448,14 @@ struct mqd_manager *mqd_manager_init_v11(enum KFD_MQD_TYPE type,
#if defined(CONFIG_DEBUG_FS)
mqd->debugfs_show_mqd = debugfs_show_mqd_sdma;
#endif
/*
* To allocate SDMA MQDs by generic functions
* when MES is enabled.
*/
if (dev->shared_resources.enable_mes) {
mqd->allocate_mqd = allocate_mqd;
mqd->free_mqd = kfd_free_mqd_cp;
}
pr_debug("%s@%i\n", __func__, __LINE__);
break;
default:

View File

@ -28,7 +28,6 @@ config DRM_AMD_DC_DCN
config DRM_AMD_DC_HDCP
bool "Enable HDCP support in DC"
depends on DRM_AMD_DC
select DRM_DISPLAY_HDCP_HELPER
help
Choose this option if you want to support HDCP authentication.

View File

@ -41,6 +41,8 @@
#include "dpcd_defs.h"
#include "link/protocols/link_dpcd.h"
#include "link_service_types.h"
#include "link/protocols/link_dp_capability.h"
#include "link/protocols/link_ddc.h"
#include "vid.h"
#include "amdgpu.h"
@ -2302,6 +2304,14 @@ static void s3_handle_mst(struct drm_device *dev, bool suspend)
if (suspend) {
drm_dp_mst_topology_mgr_suspend(mgr);
} else {
/* if extended timeout is supported in hardware,
* default to LTTPR timeout (3.2ms) first as a W/A for DP link layer
* CTS 4.2.1.1 regression introduced by CTS specs requirement update.
*/
try_to_configure_aux_timeout(aconnector->dc_link->ddc, LINK_AUX_DEFAULT_LTTPR_TIMEOUT_PERIOD);
if (!dp_is_lttpr_present(aconnector->dc_link))
try_to_configure_aux_timeout(aconnector->dc_link->ddc, LINK_AUX_DEFAULT_TIMEOUT_PERIOD);
ret = drm_dp_mst_topology_mgr_resume(mgr, true);
if (ret < 0) {
dm_helpers_dp_mst_stop_top_mgr(aconnector->dc_link->ctx,
@ -4265,6 +4275,8 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
/* Update the actual used number of crtc */
adev->mode_info.num_crtc = adev->dm.display_indexes_num;
amdgpu_dm_set_irq_funcs(adev);
link_cnt = dm->dc->caps.max_links;
if (amdgpu_dm_mode_config_init(dm->adev)) {
DRM_ERROR("DM: Failed to initialize mode config\n");
@ -4757,8 +4769,6 @@ static int dm_early_init(void *handle)
break;
}
amdgpu_dm_set_irq_funcs(adev);
if (adev->mode_info.funcs == NULL)
adev->mode_info.funcs = &dm_display_funcs;
@ -7235,7 +7245,7 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
/* This defaults to the max in the range, but we want 8bpc for non-edp. */
aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
aconnector->base.state->max_bpc = 16;
aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
if (connector_type == DRM_MODE_CONNECTOR_eDP &&

View File

@ -1149,6 +1149,8 @@ static bool dm_is_freesync_pcon_whitelist(const uint32_t branch_dev_id)
switch (branch_dev_id) {
case DP_BRANCH_DEVICE_ID_0060AD:
case DP_BRANCH_DEVICE_ID_00E04C:
case DP_BRANCH_DEVICE_ID_90CC24:
ret_val = true;
break;
default:

View File

@ -779,10 +779,8 @@ void dce110_edp_wait_for_hpd_ready(
dal_gpio_destroy_irq(&hpd);
if (false == edp_hpd_high) {
DC_LOG_WARNING(
"%s: wait timed out!\n", __func__);
}
/* ensure that the panel is detected */
ASSERT(edp_hpd_high);
}
void dce110_edp_power_control(

View File

@ -998,5 +998,8 @@ void dcn30_prepare_bandwidth(struct dc *dc,
dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, dc->clk_mgr->bw_params->clk_table.entries[dc->clk_mgr->bw_params->clk_table.num_entries - 1].memclk_mhz);
dcn20_prepare_bandwidth(dc, context);
dc_dmub_srv_p_state_delegate(dc,
context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching, context);
}

View File

@ -1802,7 +1802,10 @@ static unsigned int CalculateVMAndRowBytes(
}
if (SurfaceTiling == dm_sw_linear) {
*dpte_row_height = dml_min(128, 1 << (unsigned int) dml_floor(dml_log2(PTEBufferSizeInRequests * *PixelPTEReqWidth / Pitch), 1));
if (PTEBufferSizeInRequests == 0)
*dpte_row_height = 1;
else
*dpte_row_height = dml_min(128, 1 << (unsigned int) dml_floor(dml_log2(PTEBufferSizeInRequests * *PixelPTEReqWidth / Pitch), 1));
*dpte_row_width_ub = (dml_ceil(((double) SwathWidth - 1) / *PixelPTEReqWidth, 1) + 1) * *PixelPTEReqWidth;
*PixelPTEBytesPerRow = *dpte_row_width_ub / *PixelPTEReqWidth * *PTERequestSize;
} else if (ScanDirection != dm_vert) {

View File

@ -33,6 +33,7 @@
#define DPVGA_DONGLE_AUX_DEFER_WA_DELAY 40
#define I2C_OVER_AUX_DEFER_WA_DELAY_1MS 1
#define LINK_AUX_DEFAULT_LTTPR_TIMEOUT_PERIOD 3200 /*us*/
#define LINK_AUX_DEFAULT_TIMEOUT_PERIOD 552 /*us*/
#define EDID_SEGMENT_SIZE 256

View File

@ -60,8 +60,6 @@
#define MIN(X, Y) ((X) < (Y) ? (X) : (Y))
#endif
#define LINK_AUX_DEFAULT_TIMEOUT_PERIOD 552 /*us*/
struct dp_lt_fallback_entry {
enum dc_lane_count lane_count;
enum dc_link_rate link_rate;

View File

@ -1202,10 +1202,17 @@ static int smu_smc_hw_setup(struct smu_context *smu)
return ret;
}
ret = smu_setup_pptable(smu);
if (ret) {
dev_err(adev->dev, "Failed to setup pptable!\n");
return ret;
/*
* It is assumed the pptable used before runpm is same as
* the one used afterwards. Thus, we can reuse the stored
* copy and do not need to resetup the pptable again.
*/
if (!adev->in_runpm) {
ret = smu_setup_pptable(smu);
if (ret) {
dev_err(adev->dev, "Failed to setup pptable!\n");
return ret;
}
}
/* smu_dump_pptable(smu); */

View File

@ -256,7 +256,7 @@ int smu_v11_0_check_fw_version(struct smu_context *smu)
* to be backward compatible.
* 2. New fw usually brings some optimizations. But that's visible
* only on the paired driver.
* Considering above, we just leave user a warning message instead
* Considering above, we just leave user a verbal message instead
* of halt driver loading.
*/
if (if_version != smu->smc_driver_if_version) {
@ -264,7 +264,7 @@ int smu_v11_0_check_fw_version(struct smu_context *smu)
"smu fw program = %d, version = 0x%08x (%d.%d.%d)\n",
smu->smc_driver_if_version, if_version,
smu_program, smu_version, smu_major, smu_minor, smu_debug);
dev_warn(smu->adev->dev, "SMU driver if version not matched\n");
dev_info(smu->adev->dev, "SMU driver if version not matched\n");
}
return ret;

View File

@ -93,7 +93,7 @@ int smu_v12_0_check_fw_version(struct smu_context *smu)
* to be backward compatible.
* 2. New fw usually brings some optimizations. But that's visible
* only on the paired driver.
* Considering above, we just leave user a warning message instead
* Considering above, we just leave user a verbal message instead
* of halt driver loading.
*/
if (if_version != smu->smc_driver_if_version) {
@ -101,7 +101,7 @@ int smu_v12_0_check_fw_version(struct smu_context *smu)
"smu fw program = %d, smu fw version = 0x%08x (%d.%d.%d)\n",
smu->smc_driver_if_version, if_version,
smu_program, smu_version, smu_major, smu_minor, smu_debug);
dev_warn(smu->adev->dev, "SMU driver if version not matched\n");
dev_info(smu->adev->dev, "SMU driver if version not matched\n");
}
return ret;

View File

@ -311,7 +311,7 @@ int smu_v13_0_check_fw_version(struct smu_context *smu)
* to be backward compatible.
* 2. New fw usually brings some optimizations. But that's visible
* only on the paired driver.
* Considering above, we just leave user a warning message instead
* Considering above, we just leave user a verbal message instead
* of halt driver loading.
*/
if (if_version != smu->smc_driver_if_version) {
@ -319,7 +319,7 @@ int smu_v13_0_check_fw_version(struct smu_context *smu)
"smu fw program = %d, smu fw version = 0x%08x (%d.%d.%d)\n",
smu->smc_driver_if_version, if_version,
smu_program, smu_version, smu_major, smu_minor, smu_debug);
dev_warn(adev->dev, "SMU driver if version not matched\n");
dev_info(adev->dev, "SMU driver if version not matched\n");
}
return ret;
@ -2229,10 +2229,23 @@ int smu_v13_0_gfx_ulv_control(struct smu_context *smu,
int smu_v13_0_baco_set_armd3_sequence(struct smu_context *smu,
enum smu_baco_seq baco_seq)
{
return smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_ArmD3,
baco_seq,
NULL);
struct smu_baco_context *smu_baco = &smu->smu_baco;
int ret;
ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_ArmD3,
baco_seq,
NULL);
if (ret)
return ret;
if (baco_seq == BACO_SEQ_BAMACO ||
baco_seq == BACO_SEQ_BACO)
smu_baco->state = SMU_BACO_STATE_ENTER;
else
smu_baco->state = SMU_BACO_STATE_EXIT;
return 0;
}
bool smu_v13_0_baco_is_support(struct smu_context *smu)

View File

@ -147,6 +147,7 @@ static struct cmn2asic_msg_mapping smu_v13_0_0_message_map[SMU_MSG_MAX_COUNT] =
PPSMC_MSG_SetBadMemoryPagesRetiredFlagsPerChannel, 0),
MSG_MAP(AllowGpo, PPSMC_MSG_SetGpoAllow, 0),
MSG_MAP(AllowIHHostInterrupt, PPSMC_MSG_AllowIHHostInterrupt, 0),
MSG_MAP(ReenableAcDcInterrupt, PPSMC_MSG_ReenableAcDcInterrupt, 0),
};
static struct cmn2asic_mapping smu_v13_0_0_clk_map[SMU_CLK_COUNT] = {

View File

@ -2123,11 +2123,12 @@ int radeon_atom_pick_dig_encoder(struct drm_encoder *encoder, int fe_idx)
/*
* On DCE32 any encoder can drive any block so usually just use crtc id,
* but Apple thinks different at least on iMac10,1, so there use linkb,
* but Apple thinks different at least on iMac10,1 and iMac11,2, so there use linkb,
* otherwise the internal eDP panel will stay dark.
*/
if (ASIC_IS_DCE32(rdev)) {
if (dmi_match(DMI_PRODUCT_NAME, "iMac10,1"))
if (dmi_match(DMI_PRODUCT_NAME, "iMac10,1") ||
dmi_match(DMI_PRODUCT_NAME, "iMac11,2"))
enc_idx = (dig->linkb) ? 1 : 0;
else
enc_idx = radeon_crtc->crtc_id;

View File

@ -715,6 +715,7 @@ struct drm_amdgpu_cs_chunk_data {
#define AMDGPU_IDS_FLAGS_FUSION 0x1
#define AMDGPU_IDS_FLAGS_PREEMPTION 0x2
#define AMDGPU_IDS_FLAGS_TMZ 0x4
#define AMDGPU_IDS_FLAGS_CONFORMANT_TRUNC_COORD 0x8
/* indicate if acceleration can be working */
#define AMDGPU_INFO_ACCEL_WORKING 0x00
@ -1115,6 +1116,16 @@ struct drm_amdgpu_info_device {
__u64 tcc_disabled_mask;
__u64 min_engine_clock;
__u64 min_memory_clock;
/* The following fields are only set on gfx11+, older chips set 0. */
__u32 tcp_cache_size; /* AKA GL0, VMEM cache */
__u32 num_sqc_per_wgp;
__u32 sqc_data_cache_size; /* AKA SMEM cache */
__u32 sqc_inst_cache_size;
__u32 gl1c_cache_size;
__u32 gl2c_cache_size;
__u64 mall_size; /* AKA infinity cache */
/* high 32 bits of the rb pipes mask */
__u32 enabled_rb_pipes_mask_hi;
};
struct drm_amdgpu_info_hw_ip {