amd-drm-fixes-6.7-2023-11-17:

amdgpu:
 - DMCUB fixes
 - SR-IOV fix
 - GMC9 fix
 - Documentation fix
 - DSC MST fix
 - CS chunk parsing fix
 - SMU13.0.6 fixes
 - 8K tiled display fix
 - Fix potential NULL pointer dereferences
 - Cursor lag fix
 - Backlight fix
 - DCN s0ix fix
 - XGMI fix
 - DCN encoder disable logic fix
 - AGP aperture fixes
 -----BEGIN PGP SIGNATURE-----
 
 iHUEABYKAB0WIQQgO5Idg2tXNTSZAr293/aFa7yZ2AUCZVcIUwAKCRC93/aFa7yZ
 2Fe5AQDTv6JF+YUX0xNvPp3FWt4FrT1tLPW8GBGh5e0oWl6gIgD/TZ827laY52M0
 RVDSagOVQDcY5RnZKsUvCU6YNfRGZAY=
 =cMEv
 -----END PGP SIGNATURE-----

Merge tag 'amd-drm-fixes-6.7-2023-11-17' of https://gitlab.freedesktop.org/agd5f/linux into drm-fixes

amd-drm-fixes-6.7-2023-11-17:

amdgpu:
- DMCUB fixes
- SR-IOV fix
- GMC9 fix
- Documentation fix
- DSC MST fix
- CS chunk parsing fix
- SMU13.0.6 fixes
- 8K tiled display fix
- Fix potential NULL pointer dereferences
- Cursor lag fix
- Backlight fix
- DCN s0ix fix
- XGMI fix
- DCN encoder disable logic fix
- AGP aperture fixes

Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
From: Alex Deucher <alexander.deucher@amd.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20231117063441.4883-1-alexander.deucher@amd.com
This commit is contained in:
Daniel Vetter 2023-11-17 11:46:06 +01:00
commit 86d8f905f2
26 changed files with 160 additions and 84 deletions

View File

@ -248,6 +248,7 @@ extern int amdgpu_umsch_mm;
extern int amdgpu_seamless;
extern int amdgpu_user_partt_mode;
extern int amdgpu_agp;
#define AMDGPU_VM_MAX_NUM_CTX 4096
#define AMDGPU_SG_THRESHOLD (256*1024*1024)

View File

@ -207,7 +207,7 @@ static int amdgpu_cs_pass1(struct amdgpu_cs_parser *p,
}
for (i = 0; i < p->nchunks; i++) {
struct drm_amdgpu_cs_chunk __user **chunk_ptr = NULL;
struct drm_amdgpu_cs_chunk __user *chunk_ptr = NULL;
struct drm_amdgpu_cs_chunk user_chunk;
uint32_t __user *cdata;

View File

@ -207,6 +207,7 @@ int amdgpu_user_partt_mode = AMDGPU_AUTO_COMPUTE_PARTITION_MODE;
int amdgpu_umsch_mm;
int amdgpu_seamless = -1; /* auto */
uint amdgpu_debug_mask;
int amdgpu_agp = -1; /* auto */
static void amdgpu_drv_delayed_reset_work_handler(struct work_struct *work);
@ -961,6 +962,15 @@ module_param_named(seamless, amdgpu_seamless, int, 0444);
MODULE_PARM_DESC(debug_mask, "debug options for amdgpu, disabled by default");
module_param_named(debug_mask, amdgpu_debug_mask, uint, 0444);
/**
* DOC: agp (int)
* Enable the AGP aperture. This provides an aperture in the GPU's internal
* address space for direct access to system memory. Note that these accesses
* are non-snooped, so they are only used for access to uncached memory.
*/
MODULE_PARM_DESC(agp, "AGP (-1 = auto (default), 0 = disable, 1 = enable)");
module_param_named(agp, amdgpu_agp, int, 0444);
/* These devices are not supported by amdgpu.
* They are supported by the mach64, r128, radeon drivers
*/

View File

@ -1473,6 +1473,11 @@ int psp_xgmi_get_topology_info(struct psp_context *psp,
topology->nodes[i].num_links = (requires_reflection && topology->nodes[i].num_links) ?
topology->nodes[i].num_links : node_num_links;
}
/* popluate the connected port num info if supported and available */
if (ta_port_num_support && topology->nodes[i].num_links) {
memcpy(topology->nodes[i].port_num, link_extend_info_output->nodes[i].port_num,
sizeof(struct xgmi_connected_port_num) * TA_XGMI__MAX_PORT_NUM);
}
/* reflect the topology information for bi-directionality */
if (requires_reflection && topology->nodes[i].num_hops)

View File

@ -150,6 +150,7 @@ struct psp_xgmi_node_info {
uint8_t is_sharing_enabled;
enum ta_xgmi_assigned_sdma_engine sdma_engine;
uint8_t num_links;
struct xgmi_connected_port_num port_num[TA_XGMI__MAX_PORT_NUM];
};
struct psp_xgmi_topology_info {

View File

@ -1188,7 +1188,7 @@ static int amdgpu_ras_query_error_status_helper(struct amdgpu_device *adev,
}
if (block_obj->hw_ops->query_ras_error_count)
block_obj->hw_ops->query_ras_error_count(adev, &err_data);
block_obj->hw_ops->query_ras_error_count(adev, err_data);
if ((info->head.block == AMDGPU_RAS_BLOCK__SDMA) ||
(info->head.block == AMDGPU_RAS_BLOCK__GFX) ||

View File

@ -398,6 +398,7 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev)
* amdgpu_uvd_entity_init - init entity
*
* @adev: amdgpu_device pointer
* @ring: amdgpu_ring pointer to check
*
* Initialize the entity used for handle management in the kernel driver.
*/

View File

@ -230,6 +230,7 @@ int amdgpu_vce_sw_fini(struct amdgpu_device *adev)
* amdgpu_vce_entity_init - init entity
*
* @adev: amdgpu_device pointer
* @ring: amdgpu_ring pointer to check
*
* Initialize the entity used for handle management in the kernel driver.
*/

View File

@ -675,7 +675,7 @@ static void gmc_v10_0_vram_gtt_location(struct amdgpu_device *adev,
amdgpu_gmc_set_agp_default(adev, mc);
amdgpu_gmc_vram_location(adev, &adev->gmc, base);
amdgpu_gmc_gart_location(adev, mc, AMDGPU_GART_PLACEMENT_BEST_FIT);
if (!amdgpu_sriov_vf(adev))
if (!amdgpu_sriov_vf(adev) && (amdgpu_agp == 1))
amdgpu_gmc_agp_location(adev, mc);
/* base offset of vram pages */

View File

@ -640,8 +640,9 @@ static void gmc_v11_0_vram_gtt_location(struct amdgpu_device *adev,
amdgpu_gmc_set_agp_default(adev, mc);
amdgpu_gmc_vram_location(adev, &adev->gmc, base);
amdgpu_gmc_gart_location(adev, mc, AMDGPU_GART_PLACEMENT_HIGH);
if (!amdgpu_sriov_vf(adev) ||
(amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(11, 5, 0)))
if (!amdgpu_sriov_vf(adev) &&
(amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(11, 5, 0)) &&
(amdgpu_agp == 1))
amdgpu_gmc_agp_location(adev, mc);
/* base offset of vram pages */

View File

@ -1630,7 +1630,7 @@ static void gmc_v9_0_vram_gtt_location(struct amdgpu_device *adev,
} else {
amdgpu_gmc_vram_location(adev, mc, base);
amdgpu_gmc_gart_location(adev, mc, AMDGPU_GART_PLACEMENT_BEST_FIT);
if (!amdgpu_sriov_vf(adev))
if (!amdgpu_sriov_vf(adev) && (amdgpu_agp == 1))
amdgpu_gmc_agp_location(adev, mc);
}
/* base offset of vram pages */
@ -2170,8 +2170,6 @@ static int gmc_v9_0_sw_fini(void *handle)
if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3))
amdgpu_gmc_sysfs_fini(adev);
adev->gmc.num_mem_partitions = 0;
kfree(adev->gmc.mem_partitions);
amdgpu_gmc_ras_fini(adev);
amdgpu_gem_force_release(adev);
@ -2185,6 +2183,9 @@ static int gmc_v9_0_sw_fini(void *handle)
amdgpu_bo_free_kernel(&adev->gmc.pdb0_bo, NULL, &adev->gmc.ptr_pdb0);
amdgpu_bo_fini(adev);
adev->gmc.num_mem_partitions = 0;
kfree(adev->gmc.mem_partitions);
return 0;
}

View File

@ -130,6 +130,9 @@ static void mmhub_v1_8_init_system_aperture_regs(struct amdgpu_device *adev)
uint64_t value;
int i;
if (amdgpu_sriov_vf(adev))
return;
inst_mask = adev->aid_mask;
for_each_inst(i, inst_mask) {
/* Program the AGP BAR */
@ -139,9 +142,6 @@ static void mmhub_v1_8_init_system_aperture_regs(struct amdgpu_device *adev)
WREG32_SOC15(MMHUB, i, regMC_VM_AGP_TOP,
adev->gmc.agp_end >> 24);
if (amdgpu_sriov_vf(adev))
return;
/* Program the system aperture low logical page number. */
WREG32_SOC15(MMHUB, i, regMC_VM_SYSTEM_APERTURE_LOW_ADDR,
min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18);

View File

@ -2079,7 +2079,7 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
struct dmub_srv_create_params create_params;
struct dmub_srv_region_params region_params;
struct dmub_srv_region_info region_info;
struct dmub_srv_fb_params fb_params;
struct dmub_srv_memory_params memory_params;
struct dmub_srv_fb_info *fb_info;
struct dmub_srv *dmub_srv;
const struct dmcub_firmware_header_v1_0 *hdr;
@ -2182,6 +2182,7 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
adev->dm.dmub_fw->data +
le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
PSP_HEADER_BYTES;
region_params.is_mailbox_in_inbox = false;
status = dmub_srv_calc_region_info(dmub_srv, &region_params,
&region_info);
@ -2205,10 +2206,10 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
return r;
/* Rebase the regions on the framebuffer address. */
memset(&fb_params, 0, sizeof(fb_params));
fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
fb_params.region_info = &region_info;
memset(&memory_params, 0, sizeof(memory_params));
memory_params.cpu_fb_addr = adev->dm.dmub_bo_cpu_addr;
memory_params.gpu_fb_addr = adev->dm.dmub_bo_gpu_addr;
memory_params.region_info = &region_info;
adev->dm.dmub_fb_info =
kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
@ -2220,7 +2221,7 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
return -ENOMEM;
}
status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
status = dmub_srv_calc_mem_info(dmub_srv, &memory_params, fb_info);
if (status != DMUB_STATUS_OK) {
DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
return -EINVAL;
@ -7481,6 +7482,9 @@ static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
int i;
int result = -EIO;
if (!ddc_service->ddc_pin || !ddc_service->ddc_pin->hw_info.hw_supported)
return result;
cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
if (!cmd.payloads)
@ -9603,14 +9607,14 @@ static bool should_reset_plane(struct drm_atomic_state *state,
struct drm_plane *other;
struct drm_plane_state *old_other_state, *new_other_state;
struct drm_crtc_state *new_crtc_state;
struct amdgpu_device *adev = drm_to_adev(plane->dev);
int i;
/*
* TODO: Remove this hack once the checks below are sufficient
* enough to determine when we need to reset all the planes on
* the stream.
* TODO: Remove this hack for all asics once it proves that the
* fast updates works fine on DCN3.2+.
*/
if (state->allow_modeset)
if (adev->ip_versions[DCE_HWIP][0] < IP_VERSION(3, 2, 0) && state->allow_modeset)
return true;
/* Exit early if we know that we're adding or removing the plane. */

View File

@ -536,11 +536,8 @@ bool dm_helpers_dp_read_dpcd(
struct amdgpu_dm_connector *aconnector = link->priv;
if (!aconnector) {
drm_dbg_dp(aconnector->base.dev,
"Failed to find connector for link!\n");
if (!aconnector)
return false;
}
return drm_dp_dpcd_read(&aconnector->dm_dp_aux.aux, address, data,
size) == size;

View File

@ -1604,31 +1604,31 @@ enum dc_status dm_dp_mst_is_port_support_mode(
unsigned int upper_link_bw_in_kbps = 0, down_link_bw_in_kbps = 0;
unsigned int max_compressed_bw_in_kbps = 0;
struct dc_dsc_bw_range bw_range = {0};
struct drm_dp_mst_topology_mgr *mst_mgr;
uint16_t full_pbn = aconnector->mst_output_port->full_pbn;
/*
* check if the mode could be supported if DSC pass-through is supported
* AND check if there enough bandwidth available to support the mode
* with DSC enabled.
* Consider the case with the depth of the mst topology tree is equal or less than 2
* A. When dsc bitstream can be transmitted along the entire path
* 1. dsc is possible between source and branch/leaf device (common dsc params is possible), AND
* 2. dsc passthrough supported at MST branch, or
* 3. dsc decoding supported at leaf MST device
* Use maximum dsc compression as bw constraint
* B. When dsc bitstream cannot be transmitted along the entire path
* Use native bw as bw constraint
*/
if (is_dsc_common_config_possible(stream, &bw_range) &&
aconnector->mst_output_port->passthrough_aux) {
mst_mgr = aconnector->mst_output_port->mgr;
mutex_lock(&mst_mgr->lock);
(aconnector->mst_output_port->passthrough_aux ||
aconnector->dsc_aux == &aconnector->mst_output_port->aux)) {
cur_link_settings = stream->link->verified_link_cap;
upper_link_bw_in_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
&cur_link_settings
);
down_link_bw_in_kbps = kbps_from_pbn(aconnector->mst_output_port->full_pbn);
&cur_link_settings);
down_link_bw_in_kbps = kbps_from_pbn(full_pbn);
/* pick the bottleneck */
end_to_end_bw_in_kbps = min(upper_link_bw_in_kbps,
down_link_bw_in_kbps);
mutex_unlock(&mst_mgr->lock);
/*
* use the maximum dsc compression bandwidth as the required
* bandwidth for the mode
@ -1643,8 +1643,7 @@ enum dc_status dm_dp_mst_is_port_support_mode(
/* check if mode could be supported within full_pbn */
bpp = convert_dc_color_depth_into_bpc(stream->timing.display_color_depth) * 3;
pbn = drm_dp_calc_pbn_mode(stream->timing.pix_clk_100hz / 10, bpp, false);
if (pbn > aconnector->mst_output_port->full_pbn)
if (pbn > full_pbn)
return DC_FAIL_BANDWIDTH_VALIDATE;
}

View File

@ -820,22 +820,22 @@ static void dcn35_set_idle_state(struct clk_mgr *clk_mgr_base, bool allow_idle)
if (dc->config.disable_ips == DMUB_IPS_ENABLE ||
dc->config.disable_ips == DMUB_IPS_DISABLE_DYNAMIC) {
val |= DMUB_IPS1_ALLOW_MASK;
val |= DMUB_IPS2_ALLOW_MASK;
} else if (dc->config.disable_ips == DMUB_IPS_DISABLE_IPS1) {
val = val & ~DMUB_IPS1_ALLOW_MASK;
val = val & ~DMUB_IPS2_ALLOW_MASK;
} else if (dc->config.disable_ips == DMUB_IPS_DISABLE_IPS2) {
val |= DMUB_IPS1_ALLOW_MASK;
val = val & ~DMUB_IPS2_ALLOW_MASK;
} else if (dc->config.disable_ips == DMUB_IPS_DISABLE_IPS2_Z10) {
} else if (dc->config.disable_ips == DMUB_IPS_DISABLE_IPS1) {
val |= DMUB_IPS1_ALLOW_MASK;
val |= DMUB_IPS2_ALLOW_MASK;
} else if (dc->config.disable_ips == DMUB_IPS_DISABLE_IPS2) {
val = val & ~DMUB_IPS1_ALLOW_MASK;
val |= DMUB_IPS2_ALLOW_MASK;
} else if (dc->config.disable_ips == DMUB_IPS_DISABLE_IPS2_Z10) {
val = val & ~DMUB_IPS1_ALLOW_MASK;
val = val & ~DMUB_IPS2_ALLOW_MASK;
}
if (!allow_idle) {
val = val & ~DMUB_IPS1_ALLOW_MASK;
val = val & ~DMUB_IPS2_ALLOW_MASK;
val |= DMUB_IPS1_ALLOW_MASK;
val |= DMUB_IPS2_ALLOW_MASK;
}
dcn35_smu_write_ips_scratch(clk_mgr, val);

View File

@ -3178,7 +3178,7 @@ static bool update_planes_and_stream_state(struct dc *dc,
struct pipe_ctx *otg_master = resource_get_otg_master_for_stream(&context->res_ctx,
context->streams[i]);
if (otg_master->stream->test_pattern.type != DP_TEST_PATTERN_VIDEO_MODE)
if (otg_master && otg_master->stream->test_pattern.type != DP_TEST_PATTERN_VIDEO_MODE)
resource_build_test_pattern_params(&context->res_ctx, otg_master);
}
}
@ -4934,8 +4934,8 @@ bool dc_dmub_is_ips_idle_state(struct dc *dc)
if (dc->hwss.get_idle_state)
idle_state = dc->hwss.get_idle_state(dc);
if ((idle_state & DMUB_IPS1_ALLOW_MASK) ||
(idle_state & DMUB_IPS2_ALLOW_MASK))
if (!(idle_state & DMUB_IPS1_ALLOW_MASK) ||
!(idle_state & DMUB_IPS2_ALLOW_MASK))
return true;
return false;

View File

@ -5190,6 +5190,9 @@ bool dc_resource_acquire_secondary_pipe_for_mpc_odm_legacy(
sec_next = sec_pipe->next_odm_pipe;
sec_prev = sec_pipe->prev_odm_pipe;
if (pri_pipe == NULL)
return false;
*sec_pipe = *pri_pipe;
sec_pipe->top_pipe = sec_top;

View File

@ -1202,11 +1202,11 @@ void dc_dmub_srv_exit_low_power_state(const struct dc *dc)
allow_state = dc->hwss.get_idle_state(dc);
dc->hwss.set_idle_state(dc, false);
if (allow_state & DMUB_IPS2_ALLOW_MASK) {
if (!(allow_state & DMUB_IPS2_ALLOW_MASK)) {
// Wait for evaluation time
udelay(dc->debug.ips2_eval_delay_us);
commit_state = dc->hwss.get_idle_state(dc);
if (commit_state & DMUB_IPS2_COMMIT_MASK) {
if (!(commit_state & DMUB_IPS2_COMMIT_MASK)) {
// Tell PMFW to exit low power state
dc->clk_mgr->funcs->exit_low_power_state(dc->clk_mgr);
@ -1216,7 +1216,7 @@ void dc_dmub_srv_exit_low_power_state(const struct dc *dc)
for (i = 0; i < max_num_polls; ++i) {
commit_state = dc->hwss.get_idle_state(dc);
if (!(commit_state & DMUB_IPS2_COMMIT_MASK))
if (commit_state & DMUB_IPS2_COMMIT_MASK)
break;
udelay(1);
@ -1235,10 +1235,10 @@ void dc_dmub_srv_exit_low_power_state(const struct dc *dc)
}
dc_dmub_srv_notify_idle(dc, false);
if (allow_state & DMUB_IPS1_ALLOW_MASK) {
if (!(allow_state & DMUB_IPS1_ALLOW_MASK)) {
for (i = 0; i < max_num_polls; ++i) {
commit_state = dc->hwss.get_idle_state(dc);
if (!(commit_state & DMUB_IPS1_COMMIT_MASK))
if (commit_state & DMUB_IPS1_COMMIT_MASK)
break;
udelay(1);

View File

@ -177,6 +177,7 @@ struct dc_panel_patch {
unsigned int disable_fams;
unsigned int skip_avmute;
unsigned int mst_start_top_delay;
unsigned int remove_sink_ext_caps;
};
struct dc_edid_caps {

View File

@ -261,12 +261,6 @@ static void enc35_stream_encoder_enable(
/* invalid mode ! */
ASSERT_CRITICAL(false);
}
REG_UPDATE(DIG_FE_CLK_CNTL, DIG_FE_CLK_EN, 1);
REG_UPDATE(DIG_FE_EN_CNTL, DIG_FE_ENABLE, 1);
} else {
REG_UPDATE(DIG_FE_EN_CNTL, DIG_FE_ENABLE, 0);
REG_UPDATE(DIG_FE_CLK_CNTL, DIG_FE_CLK_EN, 0);
}
}
@ -436,6 +430,8 @@ static void enc35_disable_fifo(struct stream_encoder *enc)
struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
REG_UPDATE(DIG_FIFO_CTRL0, DIG_FIFO_ENABLE, 0);
REG_UPDATE(DIG_FE_EN_CNTL, DIG_FE_ENABLE, 0);
REG_UPDATE(DIG_FE_CLK_CNTL, DIG_FE_CLK_EN, 0);
}
static void enc35_enable_fifo(struct stream_encoder *enc)
@ -443,6 +439,8 @@ static void enc35_enable_fifo(struct stream_encoder *enc)
struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
REG_UPDATE(DIG_FIFO_CTRL0, DIG_FIFO_READ_START_LEVEL, 0x7);
REG_UPDATE(DIG_FE_CLK_CNTL, DIG_FE_CLK_EN, 1);
REG_UPDATE(DIG_FE_EN_CNTL, DIG_FE_ENABLE, 1);
enc35_reset_fifo(enc, true);
enc35_reset_fifo(enc, false);

View File

@ -1088,6 +1088,9 @@ static bool detect_link_and_local_sink(struct dc_link *link,
if (sink->edid_caps.panel_patch.skip_scdc_overwrite)
link->ctx->dc->debug.hdmi20_disable = true;
if (sink->edid_caps.panel_patch.remove_sink_ext_caps)
link->dpcd_sink_ext_caps.raw = 0;
if (dc_is_hdmi_signal(link->connector_signal))
read_scdc_caps(link->ddc, link->local_sink);

View File

@ -195,6 +195,7 @@ struct dmub_srv_region_params {
uint32_t vbios_size;
const uint8_t *fw_inst_const;
const uint8_t *fw_bss_data;
bool is_mailbox_in_inbox;
};
/**
@ -214,20 +215,25 @@ struct dmub_srv_region_params {
*/
struct dmub_srv_region_info {
uint32_t fb_size;
uint32_t inbox_size;
uint8_t num_regions;
struct dmub_region regions[DMUB_WINDOW_TOTAL];
};
/**
* struct dmub_srv_fb_params - parameters used for driver fb setup
* struct dmub_srv_memory_params - parameters used for driver fb setup
* @region_info: region info calculated by dmub service
* @cpu_addr: base cpu address for the framebuffer
* @gpu_addr: base gpu virtual address for the framebuffer
* @cpu_fb_addr: base cpu address for the framebuffer
* @cpu_inbox_addr: base cpu address for the gart
* @gpu_fb_addr: base gpu virtual address for the framebuffer
* @gpu_inbox_addr: base gpu virtual address for the gart
*/
struct dmub_srv_fb_params {
struct dmub_srv_memory_params {
const struct dmub_srv_region_info *region_info;
void *cpu_addr;
uint64_t gpu_addr;
void *cpu_fb_addr;
void *cpu_inbox_addr;
uint64_t gpu_fb_addr;
uint64_t gpu_inbox_addr;
};
/**
@ -563,8 +569,8 @@ dmub_srv_calc_region_info(struct dmub_srv *dmub,
* DMUB_STATUS_OK - success
* DMUB_STATUS_INVALID - unspecified error
*/
enum dmub_status dmub_srv_calc_fb_info(struct dmub_srv *dmub,
const struct dmub_srv_fb_params *params,
enum dmub_status dmub_srv_calc_mem_info(struct dmub_srv *dmub,
const struct dmub_srv_memory_params *params,
struct dmub_srv_fb_info *out);
/**

View File

@ -434,7 +434,7 @@ dmub_srv_calc_region_info(struct dmub_srv *dmub,
uint32_t fw_state_size = DMUB_FW_STATE_SIZE;
uint32_t trace_buffer_size = DMUB_TRACE_BUFFER_SIZE;
uint32_t scratch_mem_size = DMUB_SCRATCH_MEM_SIZE;
uint32_t previous_top = 0;
if (!dmub->sw_init)
return DMUB_STATUS_INVALID;
@ -459,8 +459,15 @@ dmub_srv_calc_region_info(struct dmub_srv *dmub,
bios->base = dmub_align(stack->top, 256);
bios->top = bios->base + params->vbios_size;
mail->base = dmub_align(bios->top, 256);
mail->top = mail->base + DMUB_MAILBOX_SIZE;
if (params->is_mailbox_in_inbox) {
mail->base = 0;
mail->top = mail->base + DMUB_MAILBOX_SIZE;
previous_top = bios->top;
} else {
mail->base = dmub_align(bios->top, 256);
mail->top = mail->base + DMUB_MAILBOX_SIZE;
previous_top = mail->top;
}
fw_info = dmub_get_fw_meta_info(params);
@ -479,7 +486,7 @@ dmub_srv_calc_region_info(struct dmub_srv *dmub,
dmub->fw_version = fw_info->fw_version;
}
trace_buff->base = dmub_align(mail->top, 256);
trace_buff->base = dmub_align(previous_top, 256);
trace_buff->top = trace_buff->base + dmub_align(trace_buffer_size, 64);
fw_state->base = dmub_align(trace_buff->top, 256);
@ -490,11 +497,14 @@ dmub_srv_calc_region_info(struct dmub_srv *dmub,
out->fb_size = dmub_align(scratch_mem->top, 4096);
if (params->is_mailbox_in_inbox)
out->inbox_size = dmub_align(mail->top, 4096);
return DMUB_STATUS_OK;
}
enum dmub_status dmub_srv_calc_fb_info(struct dmub_srv *dmub,
const struct dmub_srv_fb_params *params,
enum dmub_status dmub_srv_calc_mem_info(struct dmub_srv *dmub,
const struct dmub_srv_memory_params *params,
struct dmub_srv_fb_info *out)
{
uint8_t *cpu_base;
@ -509,8 +519,8 @@ enum dmub_status dmub_srv_calc_fb_info(struct dmub_srv *dmub,
if (params->region_info->num_regions != DMUB_NUM_WINDOWS)
return DMUB_STATUS_INVALID;
cpu_base = (uint8_t *)params->cpu_addr;
gpu_base = params->gpu_addr;
cpu_base = (uint8_t *)params->cpu_fb_addr;
gpu_base = params->gpu_fb_addr;
for (i = 0; i < DMUB_NUM_WINDOWS; ++i) {
const struct dmub_region *reg =
@ -518,6 +528,12 @@ enum dmub_status dmub_srv_calc_fb_info(struct dmub_srv *dmub,
out->fb[i].cpu_addr = cpu_base + reg->base;
out->fb[i].gpu_addr = gpu_base + reg->base;
if (i == DMUB_WINDOW_4_MAILBOX && params->cpu_inbox_addr != 0) {
out->fb[i].cpu_addr = (uint8_t *)params->cpu_inbox_addr + reg->base;
out->fb[i].gpu_addr = params->gpu_inbox_addr + reg->base;
}
out->fb[i].size = reg->top - reg->base;
}
@ -707,9 +723,16 @@ enum dmub_status dmub_srv_sync_inbox1(struct dmub_srv *dmub)
return DMUB_STATUS_INVALID;
if (dmub->hw_funcs.get_inbox1_rptr && dmub->hw_funcs.get_inbox1_wptr) {
dmub->inbox1_rb.rptr = dmub->hw_funcs.get_inbox1_rptr(dmub);
dmub->inbox1_rb.wrpt = dmub->hw_funcs.get_inbox1_wptr(dmub);
dmub->inbox1_last_wptr = dmub->inbox1_rb.wrpt;
uint32_t rptr = dmub->hw_funcs.get_inbox1_rptr(dmub);
uint32_t wptr = dmub->hw_funcs.get_inbox1_wptr(dmub);
if (rptr > dmub->inbox1_rb.capacity || wptr > dmub->inbox1_rb.capacity) {
return DMUB_STATUS_HW_FAILURE;
} else {
dmub->inbox1_rb.rptr = rptr;
dmub->inbox1_rb.wrpt = wptr;
dmub->inbox1_last_wptr = dmub->inbox1_rb.wrpt;
}
}
return DMUB_STATUS_OK;
@ -743,6 +766,11 @@ enum dmub_status dmub_srv_cmd_queue(struct dmub_srv *dmub,
if (!dmub->hw_init)
return DMUB_STATUS_INVALID;
if (dmub->inbox1_rb.rptr > dmub->inbox1_rb.capacity ||
dmub->inbox1_rb.wrpt > dmub->inbox1_rb.capacity) {
return DMUB_STATUS_HW_FAILURE;
}
if (dmub_rb_push_front(&dmub->inbox1_rb, cmd))
return DMUB_STATUS_OK;

View File

@ -123,7 +123,7 @@ typedef enum {
VOLTAGE_GUARDBAND_COUNT
} GFX_GUARDBAND_e;
#define SMU_METRICS_TABLE_VERSION 0x8
#define SMU_METRICS_TABLE_VERSION 0x9
typedef struct __attribute__((packed, aligned(4))) {
uint32_t AccumulationCounter;
@ -211,6 +211,14 @@ typedef struct __attribute__((packed, aligned(4))) {
//XGMI Data tranfser size
uint64_t XgmiReadDataSizeAcc[8];//in KByte
uint64_t XgmiWriteDataSizeAcc[8];//in KByte
//PCIE BW Data and error count
uint32_t PcieBandwidth[4];
uint32_t PCIeL0ToRecoveryCountAcc; // The Pcie counter itself is accumulated
uint32_t PCIenReplayAAcc; // The Pcie counter itself is accumulated
uint32_t PCIenReplayARolloverCountAcc; // The Pcie counter itself is accumulated
uint32_t PCIeNAKSentCountAcc; // The Pcie counter itself is accumulated
uint32_t PCIeNAKReceivedCountAcc; // The Pcie counter itself is accumulated
} MetricsTable_t;
#define SMU_VF_METRICS_TABLE_VERSION 0x3

View File

@ -1454,7 +1454,7 @@ static int smu_v13_0_6_register_irq_handler(struct smu_context *smu)
static int smu_v13_0_6_notify_unload(struct smu_context *smu)
{
if (smu->smc_fw_version <= 0x553500)
if (amdgpu_in_reset(smu->adev))
return 0;
dev_dbg(smu->adev->dev, "Notify PMFW about driver unload");
@ -2095,6 +2095,14 @@ static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table
smu_v13_0_6_get_current_pcie_link_speed(smu);
gpu_metrics->pcie_bandwidth_acc =
SMUQ10_ROUND(metrics->PcieBandwidthAcc[0]);
gpu_metrics->pcie_bandwidth_inst =
SMUQ10_ROUND(metrics->PcieBandwidth[0]);
gpu_metrics->pcie_l0_to_recov_count_acc =
metrics->PCIeL0ToRecoveryCountAcc;
gpu_metrics->pcie_replay_count_acc =
metrics->PCIenReplayAAcc;
gpu_metrics->pcie_replay_rover_count_acc =
metrics->PCIenReplayARolloverCountAcc;
}
gpu_metrics->system_clock_counter = ktime_get_boottime_ns();