mirror of
https://github.com/torvalds/linux.git
synced 2024-11-21 19:41:42 +00:00
amd-drm-next-6.8-2023-12-08:
amdgpu: - SR-IOV fixes - DCN 3.5 updates - Backlight fixes - MST fixes - DMCUB fixes - DPIA fixes - Display powergating updates - Enable writeback connectors - Misc code cleanups - Add more register state debugging for aquavanjaram - Suspend fix - Clockgating fixes - SMU 14 updates - PSR fixes - MES logging updates - Misc fixes amdkfd: - SVM fix radeon: - Fix potential memory leaks in error paths -----BEGIN PGP SIGNATURE----- iHUEABYKAB0WIQQgO5Idg2tXNTSZAr293/aFa7yZ2AUCZXOBHwAKCRC93/aFa7yZ 2E9hAP9Dqv/NKWf9aMZ01msdOysScu7FmAsnFrXjR0Mx0zX4WQD/er2/UrSR146+ VDaxFbcjINoq0Q8b7CWLG4gy5Q9fDAo= =JU7h -----END PGP SIGNATURE----- Merge tag 'amd-drm-next-6.8-2023-12-08' of https://gitlab.freedesktop.org/agd5f/linux into drm-next amd-drm-next-6.8-2023-12-08: amdgpu: - SR-IOV fixes - DCN 3.5 updates - Backlight fixes - MST fixes - DMCUB fixes - DPIA fixes - Display powergating updates - Enable writeback connectors - Misc code cleanups - Add more register state debugging for aquavanjaram - Suspend fix - Clockgating fixes - SMU 14 updates - PSR fixes - MES logging updates - Misc fixes amdkfd: - SVM fix radeon: - Fix potential memory leaks in error paths Signed-off-by: Dave Airlie <airlied@redhat.com> From: Alex Deucher <alexander.deucher@amd.com> Link: https://patchwork.freedesktop.org/patch/msgid/20231208205613.4861-1-alexander.deucher@amd.com
This commit is contained in:
commit
a0a28956b4
@ -2147,6 +2147,8 @@ int amdgpu_debugfs_init(struct amdgpu_device *adev)
|
||||
amdgpu_debugfs_firmware_init(adev);
|
||||
amdgpu_ta_if_debugfs_init(adev);
|
||||
|
||||
amdgpu_debugfs_mes_event_log_init(adev);
|
||||
|
||||
#if defined(CONFIG_DRM_AMD_DC)
|
||||
if (adev->dc_enabled)
|
||||
dtn_debugfs_init(adev);
|
||||
|
@ -32,3 +32,5 @@ void amdgpu_debugfs_fini(struct amdgpu_device *adev);
|
||||
void amdgpu_debugfs_fence_init(struct amdgpu_device *adev);
|
||||
void amdgpu_debugfs_firmware_init(struct amdgpu_device *adev);
|
||||
void amdgpu_debugfs_gem_init(struct amdgpu_device *adev);
|
||||
void amdgpu_debugfs_mes_event_log_init(struct amdgpu_device *adev);
|
||||
|
||||
|
@ -98,6 +98,26 @@ static int amdgpu_mes_doorbell_init(struct amdgpu_device *adev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int amdgpu_mes_event_log_init(struct amdgpu_device *adev)
|
||||
{
|
||||
int r;
|
||||
|
||||
r = amdgpu_bo_create_kernel(adev, PAGE_SIZE, PAGE_SIZE,
|
||||
AMDGPU_GEM_DOMAIN_GTT,
|
||||
&adev->mes.event_log_gpu_obj,
|
||||
&adev->mes.event_log_gpu_addr,
|
||||
&adev->mes.event_log_cpu_addr);
|
||||
if (r) {
|
||||
dev_warn(adev->dev, "failed to create MES event log buffer (%d)", r);
|
||||
return r;
|
||||
}
|
||||
|
||||
memset(adev->mes.event_log_cpu_addr, 0, PAGE_SIZE);
|
||||
|
||||
return 0;
|
||||
|
||||
}
|
||||
|
||||
static void amdgpu_mes_doorbell_free(struct amdgpu_device *adev)
|
||||
{
|
||||
bitmap_free(adev->mes.doorbell_bitmap);
|
||||
@ -182,8 +202,14 @@ int amdgpu_mes_init(struct amdgpu_device *adev)
|
||||
if (r)
|
||||
goto error;
|
||||
|
||||
r = amdgpu_mes_event_log_init(adev);
|
||||
if (r)
|
||||
goto error_doorbell;
|
||||
|
||||
return 0;
|
||||
|
||||
error_doorbell:
|
||||
amdgpu_mes_doorbell_free(adev);
|
||||
error:
|
||||
amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs);
|
||||
amdgpu_device_wb_free(adev, adev->mes.query_status_fence_offs);
|
||||
@ -199,6 +225,10 @@ error_ids:
|
||||
|
||||
void amdgpu_mes_fini(struct amdgpu_device *adev)
|
||||
{
|
||||
amdgpu_bo_free_kernel(&adev->mes.event_log_gpu_obj,
|
||||
&adev->mes.event_log_gpu_addr,
|
||||
&adev->mes.event_log_cpu_addr);
|
||||
|
||||
amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs);
|
||||
amdgpu_device_wb_free(adev, adev->mes.query_status_fence_offs);
|
||||
amdgpu_device_wb_free(adev, adev->mes.read_val_offs);
|
||||
@ -1479,3 +1509,34 @@ out:
|
||||
amdgpu_ucode_release(&adev->mes.fw[pipe]);
|
||||
return r;
|
||||
}
|
||||
|
||||
#if defined(CONFIG_DEBUG_FS)
|
||||
|
||||
static int amdgpu_debugfs_mes_event_log_show(struct seq_file *m, void *unused)
|
||||
{
|
||||
struct amdgpu_device *adev = m->private;
|
||||
uint32_t *mem = (uint32_t *)(adev->mes.event_log_cpu_addr);
|
||||
|
||||
seq_hex_dump(m, "", DUMP_PREFIX_OFFSET, 32, 4,
|
||||
mem, PAGE_SIZE, false);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
DEFINE_SHOW_ATTRIBUTE(amdgpu_debugfs_mes_event_log);
|
||||
|
||||
#endif
|
||||
|
||||
void amdgpu_debugfs_mes_event_log_init(struct amdgpu_device *adev)
|
||||
{
|
||||
|
||||
#if defined(CONFIG_DEBUG_FS)
|
||||
struct drm_minor *minor = adev_to_drm(adev)->primary;
|
||||
struct dentry *root = minor->debugfs_root;
|
||||
|
||||
debugfs_create_file("amdgpu_mes_event_log", 0444, root,
|
||||
adev, &amdgpu_debugfs_mes_event_log_fops);
|
||||
|
||||
#endif
|
||||
}
|
||||
|
@ -133,6 +133,11 @@ struct amdgpu_mes {
|
||||
uint32_t num_mes_dbs;
|
||||
unsigned long *doorbell_bitmap;
|
||||
|
||||
/* MES event log buffer */
|
||||
struct amdgpu_bo *event_log_gpu_obj;
|
||||
uint64_t event_log_gpu_addr;
|
||||
void *event_log_cpu_addr;
|
||||
|
||||
/* ip specific functions */
|
||||
const struct amdgpu_mes_funcs *funcs;
|
||||
};
|
||||
|
@ -416,6 +416,10 @@ struct amdgpu_crtc {
|
||||
|
||||
int otg_inst;
|
||||
struct drm_pending_vblank_event *event;
|
||||
|
||||
bool wb_pending;
|
||||
bool wb_enabled;
|
||||
struct drm_writeback_connector *wb_conn;
|
||||
};
|
||||
|
||||
struct amdgpu_encoder_atom_dig {
|
||||
|
@ -848,6 +848,198 @@ static ssize_t aqua_vanjaram_read_xgmi_state(struct amdgpu_device *adev,
|
||||
return xgmi_reg_state->common_header.structure_size;
|
||||
}
|
||||
|
||||
#define smnreg_0x11C00070 0x11C00070
|
||||
#define smnreg_0x11C00210 0x11C00210
|
||||
|
||||
static struct aqua_reg_list wafl_reg_addrs[] = {
|
||||
{ smnreg_0x11C00070, 4, DW_ADDR_INCR },
|
||||
{ smnreg_0x11C00210, 1, 0 },
|
||||
};
|
||||
|
||||
#define WAFL_LINK_REG(smnreg, l) ((smnreg) | (l << 20))
|
||||
|
||||
#define NUM_WAFL_SMN_REGS 5
|
||||
|
||||
static ssize_t aqua_vanjaram_read_wafl_state(struct amdgpu_device *adev,
|
||||
void *buf, size_t max_size)
|
||||
{
|
||||
struct amdgpu_reg_state_wafl_v1_0 *wafl_reg_state;
|
||||
uint32_t start_addr, incrx, num_regs, szbuf;
|
||||
struct amdgpu_regs_wafl_v1_0 *wafl_regs;
|
||||
struct amdgpu_smn_reg_data *reg_data;
|
||||
const int max_wafl_instances = 8;
|
||||
int inst = 0, i, j, r, n;
|
||||
const int wafl_inst = 2;
|
||||
void *p;
|
||||
|
||||
if (!buf || !max_size)
|
||||
return -EINVAL;
|
||||
|
||||
wafl_reg_state = (struct amdgpu_reg_state_wafl_v1_0 *)buf;
|
||||
|
||||
szbuf = sizeof(*wafl_reg_state) +
|
||||
amdgpu_reginst_size(max_wafl_instances, sizeof(*wafl_regs),
|
||||
NUM_WAFL_SMN_REGS);
|
||||
|
||||
if (max_size < szbuf)
|
||||
return -EOVERFLOW;
|
||||
|
||||
p = &wafl_reg_state->wafl_state_regs[0];
|
||||
for_each_inst(i, adev->aid_mask) {
|
||||
for (j = 0; j < wafl_inst; ++j) {
|
||||
wafl_regs = (struct amdgpu_regs_wafl_v1_0 *)p;
|
||||
wafl_regs->inst_header.instance = inst++;
|
||||
|
||||
wafl_regs->inst_header.state = AMDGPU_INST_S_OK;
|
||||
wafl_regs->inst_header.num_smn_regs = NUM_WAFL_SMN_REGS;
|
||||
|
||||
reg_data = wafl_regs->smn_reg_values;
|
||||
|
||||
for (r = 0; r < ARRAY_SIZE(wafl_reg_addrs); r++) {
|
||||
start_addr = wafl_reg_addrs[r].start_addr;
|
||||
incrx = wafl_reg_addrs[r].incrx;
|
||||
num_regs = wafl_reg_addrs[r].num_regs;
|
||||
for (n = 0; n < num_regs; n++) {
|
||||
aqua_read_smn_ext(
|
||||
adev, reg_data,
|
||||
WAFL_LINK_REG(start_addr, j) +
|
||||
n * incrx,
|
||||
i);
|
||||
++reg_data;
|
||||
}
|
||||
}
|
||||
p = reg_data;
|
||||
}
|
||||
}
|
||||
|
||||
wafl_reg_state->common_header.structure_size = szbuf;
|
||||
wafl_reg_state->common_header.format_revision = 1;
|
||||
wafl_reg_state->common_header.content_revision = 0;
|
||||
wafl_reg_state->common_header.state_type = AMDGPU_REG_STATE_TYPE_WAFL;
|
||||
wafl_reg_state->common_header.num_instances = max_wafl_instances;
|
||||
|
||||
return wafl_reg_state->common_header.structure_size;
|
||||
}
|
||||
|
||||
#define smnreg_0x1B311060 0x1B311060
|
||||
#define smnreg_0x1B411060 0x1B411060
|
||||
#define smnreg_0x1B511060 0x1B511060
|
||||
#define smnreg_0x1B611060 0x1B611060
|
||||
|
||||
#define smnreg_0x1C307120 0x1C307120
|
||||
#define smnreg_0x1C317120 0x1C317120
|
||||
|
||||
#define smnreg_0x1C320830 0x1C320830
|
||||
#define smnreg_0x1C380830 0x1C380830
|
||||
#define smnreg_0x1C3D0830 0x1C3D0830
|
||||
#define smnreg_0x1C420830 0x1C420830
|
||||
|
||||
#define smnreg_0x1C320100 0x1C320100
|
||||
#define smnreg_0x1C380100 0x1C380100
|
||||
#define smnreg_0x1C3D0100 0x1C3D0100
|
||||
#define smnreg_0x1C420100 0x1C420100
|
||||
|
||||
#define smnreg_0x1B310500 0x1B310500
|
||||
#define smnreg_0x1C300400 0x1C300400
|
||||
|
||||
#define USR_CAKE_INCR 0x11000
|
||||
#define USR_LINK_INCR 0x100000
|
||||
#define USR_CP_INCR 0x10000
|
||||
|
||||
#define NUM_USR_SMN_REGS 20
|
||||
|
||||
struct aqua_reg_list usr_reg_addrs[] = {
|
||||
{ smnreg_0x1B311060, 4, DW_ADDR_INCR },
|
||||
{ smnreg_0x1B411060, 4, DW_ADDR_INCR },
|
||||
{ smnreg_0x1B511060, 4, DW_ADDR_INCR },
|
||||
{ smnreg_0x1B611060, 4, DW_ADDR_INCR },
|
||||
{ smnreg_0x1C307120, 2, DW_ADDR_INCR },
|
||||
{ smnreg_0x1C317120, 2, DW_ADDR_INCR },
|
||||
};
|
||||
|
||||
#define NUM_USR1_SMN_REGS 46
|
||||
struct aqua_reg_list usr1_reg_addrs[] = {
|
||||
{ smnreg_0x1C320830, 6, USR_CAKE_INCR },
|
||||
{ smnreg_0x1C380830, 5, USR_CAKE_INCR },
|
||||
{ smnreg_0x1C3D0830, 5, USR_CAKE_INCR },
|
||||
{ smnreg_0x1C420830, 4, USR_CAKE_INCR },
|
||||
{ smnreg_0x1C320100, 6, USR_CAKE_INCR },
|
||||
{ smnreg_0x1C380100, 5, USR_CAKE_INCR },
|
||||
{ smnreg_0x1C3D0100, 5, USR_CAKE_INCR },
|
||||
{ smnreg_0x1C420100, 4, USR_CAKE_INCR },
|
||||
{ smnreg_0x1B310500, 4, USR_LINK_INCR },
|
||||
{ smnreg_0x1C300400, 2, USR_CP_INCR },
|
||||
};
|
||||
|
||||
static ssize_t aqua_vanjaram_read_usr_state(struct amdgpu_device *adev,
|
||||
void *buf, size_t max_size,
|
||||
int reg_state)
|
||||
{
|
||||
uint32_t start_addr, incrx, num_regs, szbuf, num_smn;
|
||||
struct amdgpu_reg_state_usr_v1_0 *usr_reg_state;
|
||||
struct amdgpu_regs_usr_v1_0 *usr_regs;
|
||||
struct amdgpu_smn_reg_data *reg_data;
|
||||
const int max_usr_instances = 4;
|
||||
struct aqua_reg_list *reg_addrs;
|
||||
int inst = 0, i, n, r, arr_size;
|
||||
void *p;
|
||||
|
||||
if (!buf || !max_size)
|
||||
return -EINVAL;
|
||||
|
||||
switch (reg_state) {
|
||||
case AMDGPU_REG_STATE_TYPE_USR:
|
||||
arr_size = ARRAY_SIZE(usr_reg_addrs);
|
||||
reg_addrs = usr_reg_addrs;
|
||||
num_smn = NUM_USR_SMN_REGS;
|
||||
break;
|
||||
case AMDGPU_REG_STATE_TYPE_USR_1:
|
||||
arr_size = ARRAY_SIZE(usr1_reg_addrs);
|
||||
reg_addrs = usr1_reg_addrs;
|
||||
num_smn = NUM_USR1_SMN_REGS;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
usr_reg_state = (struct amdgpu_reg_state_usr_v1_0 *)buf;
|
||||
|
||||
szbuf = sizeof(*usr_reg_state) + amdgpu_reginst_size(max_usr_instances,
|
||||
sizeof(*usr_regs),
|
||||
num_smn);
|
||||
if (max_size < szbuf)
|
||||
return -EOVERFLOW;
|
||||
|
||||
p = &usr_reg_state->usr_state_regs[0];
|
||||
for_each_inst(i, adev->aid_mask) {
|
||||
usr_regs = (struct amdgpu_regs_usr_v1_0 *)p;
|
||||
usr_regs->inst_header.instance = inst++;
|
||||
usr_regs->inst_header.state = AMDGPU_INST_S_OK;
|
||||
usr_regs->inst_header.num_smn_regs = num_smn;
|
||||
reg_data = usr_regs->smn_reg_values;
|
||||
|
||||
for (r = 0; r < arr_size; r++) {
|
||||
start_addr = reg_addrs[r].start_addr;
|
||||
incrx = reg_addrs[r].incrx;
|
||||
num_regs = reg_addrs[r].num_regs;
|
||||
for (n = 0; n < num_regs; n++) {
|
||||
aqua_read_smn_ext(adev, reg_data,
|
||||
start_addr + n * incrx, i);
|
||||
reg_data++;
|
||||
}
|
||||
}
|
||||
p = reg_data;
|
||||
}
|
||||
|
||||
usr_reg_state->common_header.structure_size = szbuf;
|
||||
usr_reg_state->common_header.format_revision = 1;
|
||||
usr_reg_state->common_header.content_revision = 0;
|
||||
usr_reg_state->common_header.state_type = AMDGPU_REG_STATE_TYPE_USR;
|
||||
usr_reg_state->common_header.num_instances = max_usr_instances;
|
||||
|
||||
return usr_reg_state->common_header.structure_size;
|
||||
}
|
||||
|
||||
ssize_t aqua_vanjaram_get_reg_state(struct amdgpu_device *adev,
|
||||
enum amdgpu_reg_state reg_state, void *buf,
|
||||
size_t max_size)
|
||||
@ -861,6 +1053,17 @@ ssize_t aqua_vanjaram_get_reg_state(struct amdgpu_device *adev,
|
||||
case AMDGPU_REG_STATE_TYPE_XGMI:
|
||||
size = aqua_vanjaram_read_xgmi_state(adev, buf, max_size);
|
||||
break;
|
||||
case AMDGPU_REG_STATE_TYPE_WAFL:
|
||||
size = aqua_vanjaram_read_wafl_state(adev, buf, max_size);
|
||||
break;
|
||||
case AMDGPU_REG_STATE_TYPE_USR:
|
||||
size = aqua_vanjaram_read_usr_state(adev, buf, max_size,
|
||||
AMDGPU_REG_STATE_TYPE_USR);
|
||||
break;
|
||||
case AMDGPU_REG_STATE_TYPE_USR_1:
|
||||
size = aqua_vanjaram_read_usr_state(
|
||||
adev, buf, max_size, AMDGPU_REG_STATE_TYPE_USR_1);
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -408,6 +408,8 @@ static int mes_v11_0_set_hw_resources(struct amdgpu_mes *mes)
|
||||
mes_set_hw_res_pkt.enable_reg_active_poll = 1;
|
||||
mes_set_hw_res_pkt.enable_level_process_quantum_check = 1;
|
||||
mes_set_hw_res_pkt.oversubscription_timer = 50;
|
||||
mes_set_hw_res_pkt.enable_mes_event_int_logging = 1;
|
||||
mes_set_hw_res_pkt.event_intr_history_gpu_mc_ptr = mes->event_log_gpu_addr;
|
||||
|
||||
return mes_v11_0_submit_pkt_and_poll_completion(mes,
|
||||
&mes_set_hw_res_pkt, sizeof(mes_set_hw_res_pkt),
|
||||
|
@ -100,6 +100,31 @@ static int vcn_v4_0_early_init(void *handle)
|
||||
return amdgpu_vcn_early_init(adev);
|
||||
}
|
||||
|
||||
static int vcn_v4_0_fw_shared_init(struct amdgpu_device *adev, int inst_idx)
|
||||
{
|
||||
volatile struct amdgpu_vcn4_fw_shared *fw_shared;
|
||||
|
||||
fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
|
||||
fw_shared->present_flag_0 = cpu_to_le32(AMDGPU_FW_SHARED_FLAG_0_UNIFIED_QUEUE);
|
||||
fw_shared->sq.is_enabled = 1;
|
||||
|
||||
fw_shared->present_flag_0 |= cpu_to_le32(AMDGPU_VCN_SMU_DPM_INTERFACE_FLAG);
|
||||
fw_shared->smu_dpm_interface.smu_interface_type = (adev->flags & AMD_IS_APU) ?
|
||||
AMDGPU_VCN_SMU_DPM_INTERFACE_APU : AMDGPU_VCN_SMU_DPM_INTERFACE_DGPU;
|
||||
|
||||
if (amdgpu_ip_version(adev, VCN_HWIP, 0) ==
|
||||
IP_VERSION(4, 0, 2)) {
|
||||
fw_shared->present_flag_0 |= AMDGPU_FW_SHARED_FLAG_0_DRM_KEY_INJECT;
|
||||
fw_shared->drm_key_wa.method =
|
||||
AMDGPU_DRM_KEY_INJECT_WORKAROUND_VCNFW_ASD_HANDSHAKING;
|
||||
}
|
||||
|
||||
if (amdgpu_vcnfw_log)
|
||||
amdgpu_vcn_fwlog_init(&adev->vcn.inst[inst_idx]);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* vcn_v4_0_sw_init - sw init for VCN block
|
||||
*
|
||||
@ -124,8 +149,6 @@ static int vcn_v4_0_sw_init(void *handle)
|
||||
return r;
|
||||
|
||||
for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
|
||||
volatile struct amdgpu_vcn4_fw_shared *fw_shared;
|
||||
|
||||
if (adev->vcn.harvest_config & (1 << i))
|
||||
continue;
|
||||
|
||||
@ -161,23 +184,7 @@ static int vcn_v4_0_sw_init(void *handle)
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
|
||||
fw_shared->present_flag_0 = cpu_to_le32(AMDGPU_FW_SHARED_FLAG_0_UNIFIED_QUEUE);
|
||||
fw_shared->sq.is_enabled = 1;
|
||||
|
||||
fw_shared->present_flag_0 |= cpu_to_le32(AMDGPU_VCN_SMU_DPM_INTERFACE_FLAG);
|
||||
fw_shared->smu_dpm_interface.smu_interface_type = (adev->flags & AMD_IS_APU) ?
|
||||
AMDGPU_VCN_SMU_DPM_INTERFACE_APU : AMDGPU_VCN_SMU_DPM_INTERFACE_DGPU;
|
||||
|
||||
if (amdgpu_ip_version(adev, VCN_HWIP, 0) ==
|
||||
IP_VERSION(4, 0, 2)) {
|
||||
fw_shared->present_flag_0 |= AMDGPU_FW_SHARED_FLAG_0_DRM_KEY_INJECT;
|
||||
fw_shared->drm_key_wa.method =
|
||||
AMDGPU_DRM_KEY_INJECT_WORKAROUND_VCNFW_ASD_HANDSHAKING;
|
||||
}
|
||||
|
||||
if (amdgpu_vcnfw_log)
|
||||
amdgpu_vcn_fwlog_init(&adev->vcn.inst[i]);
|
||||
vcn_v4_0_fw_shared_init(adev, i);
|
||||
}
|
||||
|
||||
if (amdgpu_sriov_vf(adev)) {
|
||||
@ -1273,6 +1280,9 @@ static int vcn_v4_0_start_sriov(struct amdgpu_device *adev)
|
||||
if (adev->vcn.harvest_config & (1 << i))
|
||||
continue;
|
||||
|
||||
// Must re/init fw_shared at beginning
|
||||
vcn_v4_0_fw_shared_init(adev, i);
|
||||
|
||||
table_size = 0;
|
||||
|
||||
MMSCH_V4_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(VCN, i,
|
||||
|
@ -442,10 +442,10 @@ svm_migrate_vma_to_vram(struct kfd_node *node, struct svm_range *prange,
|
||||
goto out_free;
|
||||
}
|
||||
if (cpages != npages)
|
||||
pr_debug("partial migration, 0x%lx/0x%llx pages migrated\n",
|
||||
pr_debug("partial migration, 0x%lx/0x%llx pages collected\n",
|
||||
cpages, npages);
|
||||
else
|
||||
pr_debug("0x%lx pages migrated\n", cpages);
|
||||
pr_debug("0x%lx pages collected\n", cpages);
|
||||
|
||||
r = svm_migrate_copy_to_vram(node, prange, &migrate, &mfence, scratch, ttm_res_offset);
|
||||
migrate_vma_pages(&migrate);
|
||||
@ -479,6 +479,8 @@ out:
|
||||
* svm_migrate_ram_to_vram - migrate svm range from system to device
|
||||
* @prange: range structure
|
||||
* @best_loc: the device to migrate to
|
||||
* @start_mgr: start page to migrate
|
||||
* @last_mgr: last page to migrate
|
||||
* @mm: the process mm structure
|
||||
* @trigger: reason of migration
|
||||
*
|
||||
@ -489,6 +491,7 @@ out:
|
||||
*/
|
||||
static int
|
||||
svm_migrate_ram_to_vram(struct svm_range *prange, uint32_t best_loc,
|
||||
unsigned long start_mgr, unsigned long last_mgr,
|
||||
struct mm_struct *mm, uint32_t trigger)
|
||||
{
|
||||
unsigned long addr, start, end;
|
||||
@ -498,10 +501,10 @@ svm_migrate_ram_to_vram(struct svm_range *prange, uint32_t best_loc,
|
||||
unsigned long cpages = 0;
|
||||
long r = 0;
|
||||
|
||||
if (prange->actual_loc == best_loc) {
|
||||
pr_debug("svms 0x%p [0x%lx 0x%lx] already on best_loc 0x%x\n",
|
||||
prange->svms, prange->start, prange->last, best_loc);
|
||||
return 0;
|
||||
if (start_mgr < prange->start || last_mgr > prange->last) {
|
||||
pr_debug("range [0x%lx 0x%lx] out prange [0x%lx 0x%lx]\n",
|
||||
start_mgr, last_mgr, prange->start, prange->last);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
node = svm_range_get_node_by_id(prange, best_loc);
|
||||
@ -510,18 +513,19 @@ svm_migrate_ram_to_vram(struct svm_range *prange, uint32_t best_loc,
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
pr_debug("svms 0x%p [0x%lx 0x%lx] to gpu 0x%x\n", prange->svms,
|
||||
prange->start, prange->last, best_loc);
|
||||
pr_debug("svms 0x%p [0x%lx 0x%lx] in [0x%lx 0x%lx] to gpu 0x%x\n",
|
||||
prange->svms, start_mgr, last_mgr, prange->start, prange->last,
|
||||
best_loc);
|
||||
|
||||
start = prange->start << PAGE_SHIFT;
|
||||
end = (prange->last + 1) << PAGE_SHIFT;
|
||||
start = start_mgr << PAGE_SHIFT;
|
||||
end = (last_mgr + 1) << PAGE_SHIFT;
|
||||
|
||||
r = svm_range_vram_node_new(node, prange, true);
|
||||
if (r) {
|
||||
dev_dbg(node->adev->dev, "fail %ld to alloc vram\n", r);
|
||||
return r;
|
||||
}
|
||||
ttm_res_offset = prange->offset << PAGE_SHIFT;
|
||||
ttm_res_offset = (start_mgr - prange->start + prange->offset) << PAGE_SHIFT;
|
||||
|
||||
for (addr = start; addr < end;) {
|
||||
unsigned long next;
|
||||
@ -544,8 +548,11 @@ svm_migrate_ram_to_vram(struct svm_range *prange, uint32_t best_loc,
|
||||
|
||||
if (cpages) {
|
||||
prange->actual_loc = best_loc;
|
||||
svm_range_dma_unmap(prange);
|
||||
} else {
|
||||
prange->vram_pages = prange->vram_pages + cpages;
|
||||
} else if (!prange->actual_loc) {
|
||||
/* if no page migrated and all pages from prange are at
|
||||
* sys ram drop svm_bo got from svm_range_vram_node_new
|
||||
*/
|
||||
svm_range_vram_node_free(prange);
|
||||
}
|
||||
|
||||
@ -663,9 +670,8 @@ out_oom:
|
||||
* Context: Process context, caller hold mmap read lock, prange->migrate_mutex
|
||||
*
|
||||
* Return:
|
||||
* 0 - success with all pages migrated
|
||||
* negative values - indicate error
|
||||
* positive values - partial migration, number of pages not migrated
|
||||
* positive values or zero - number of pages got migrated
|
||||
*/
|
||||
static long
|
||||
svm_migrate_vma_to_ram(struct kfd_node *node, struct svm_range *prange,
|
||||
@ -676,6 +682,7 @@ svm_migrate_vma_to_ram(struct kfd_node *node, struct svm_range *prange,
|
||||
uint64_t npages = (end - start) >> PAGE_SHIFT;
|
||||
unsigned long upages = npages;
|
||||
unsigned long cpages = 0;
|
||||
unsigned long mpages = 0;
|
||||
struct amdgpu_device *adev = node->adev;
|
||||
struct kfd_process_device *pdd;
|
||||
struct dma_fence *mfence = NULL;
|
||||
@ -725,10 +732,10 @@ svm_migrate_vma_to_ram(struct kfd_node *node, struct svm_range *prange,
|
||||
goto out_free;
|
||||
}
|
||||
if (cpages != npages)
|
||||
pr_debug("partial migration, 0x%lx/0x%llx pages migrated\n",
|
||||
pr_debug("partial migration, 0x%lx/0x%llx pages collected\n",
|
||||
cpages, npages);
|
||||
else
|
||||
pr_debug("0x%lx pages migrated\n", cpages);
|
||||
pr_debug("0x%lx pages collected\n", cpages);
|
||||
|
||||
r = svm_migrate_copy_to_ram(adev, prange, &migrate, &mfence,
|
||||
scratch, npages);
|
||||
@ -751,17 +758,21 @@ out_free:
|
||||
kvfree(buf);
|
||||
out:
|
||||
if (!r && cpages) {
|
||||
mpages = cpages - upages;
|
||||
pdd = svm_range_get_pdd_by_node(prange, node);
|
||||
if (pdd)
|
||||
WRITE_ONCE(pdd->page_out, pdd->page_out + cpages);
|
||||
WRITE_ONCE(pdd->page_out, pdd->page_out + mpages);
|
||||
}
|
||||
return r ? r : upages;
|
||||
|
||||
return r ? r : mpages;
|
||||
}
|
||||
|
||||
/**
|
||||
* svm_migrate_vram_to_ram - migrate svm range from device to system
|
||||
* @prange: range structure
|
||||
* @mm: process mm, use current->mm if NULL
|
||||
* @start_mgr: start page need be migrated to sys ram
|
||||
* @last_mgr: last page need be migrated to sys ram
|
||||
* @trigger: reason of migration
|
||||
* @fault_page: is from vmf->page, svm_migrate_to_ram(), this is CPU page fault callback
|
||||
*
|
||||
@ -771,6 +782,7 @@ out:
|
||||
* 0 - OK, otherwise error code
|
||||
*/
|
||||
int svm_migrate_vram_to_ram(struct svm_range *prange, struct mm_struct *mm,
|
||||
unsigned long start_mgr, unsigned long last_mgr,
|
||||
uint32_t trigger, struct page *fault_page)
|
||||
{
|
||||
struct kfd_node *node;
|
||||
@ -778,26 +790,33 @@ int svm_migrate_vram_to_ram(struct svm_range *prange, struct mm_struct *mm,
|
||||
unsigned long addr;
|
||||
unsigned long start;
|
||||
unsigned long end;
|
||||
unsigned long upages = 0;
|
||||
unsigned long mpages = 0;
|
||||
long r = 0;
|
||||
|
||||
/* this pragne has no any vram page to migrate to sys ram */
|
||||
if (!prange->actual_loc) {
|
||||
pr_debug("[0x%lx 0x%lx] already migrated to ram\n",
|
||||
prange->start, prange->last);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (start_mgr < prange->start || last_mgr > prange->last) {
|
||||
pr_debug("range [0x%lx 0x%lx] out prange [0x%lx 0x%lx]\n",
|
||||
start_mgr, last_mgr, prange->start, prange->last);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
node = svm_range_get_node_by_id(prange, prange->actual_loc);
|
||||
if (!node) {
|
||||
pr_debug("failed to get kfd node by id 0x%x\n", prange->actual_loc);
|
||||
return -ENODEV;
|
||||
}
|
||||
pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx] from gpu 0x%x to ram\n",
|
||||
prange->svms, prange, prange->start, prange->last,
|
||||
prange->svms, prange, start_mgr, last_mgr,
|
||||
prange->actual_loc);
|
||||
|
||||
start = prange->start << PAGE_SHIFT;
|
||||
end = (prange->last + 1) << PAGE_SHIFT;
|
||||
start = start_mgr << PAGE_SHIFT;
|
||||
end = (last_mgr + 1) << PAGE_SHIFT;
|
||||
|
||||
for (addr = start; addr < end;) {
|
||||
unsigned long next;
|
||||
@ -816,14 +835,21 @@ int svm_migrate_vram_to_ram(struct svm_range *prange, struct mm_struct *mm,
|
||||
pr_debug("failed %ld to migrate prange %p\n", r, prange);
|
||||
break;
|
||||
} else {
|
||||
upages += r;
|
||||
mpages += r;
|
||||
}
|
||||
addr = next;
|
||||
}
|
||||
|
||||
if (r >= 0 && !upages) {
|
||||
svm_range_vram_node_free(prange);
|
||||
prange->actual_loc = 0;
|
||||
if (r >= 0) {
|
||||
prange->vram_pages -= mpages;
|
||||
|
||||
/* prange does not have vram page set its actual_loc to system
|
||||
* and drop its svm_bo ref
|
||||
*/
|
||||
if (prange->vram_pages == 0 && prange->ttm_res) {
|
||||
prange->actual_loc = 0;
|
||||
svm_range_vram_node_free(prange);
|
||||
}
|
||||
}
|
||||
|
||||
return r < 0 ? r : 0;
|
||||
@ -833,17 +859,23 @@ int svm_migrate_vram_to_ram(struct svm_range *prange, struct mm_struct *mm,
|
||||
* svm_migrate_vram_to_vram - migrate svm range from device to device
|
||||
* @prange: range structure
|
||||
* @best_loc: the device to migrate to
|
||||
* @start: start page need be migrated to sys ram
|
||||
* @last: last page need be migrated to sys ram
|
||||
* @mm: process mm, use current->mm if NULL
|
||||
* @trigger: reason of migration
|
||||
*
|
||||
* Context: Process context, caller hold mmap read lock, svms lock, prange lock
|
||||
*
|
||||
* migrate all vram pages in prange to sys ram, then migrate
|
||||
* [start, last] pages from sys ram to gpu node best_loc.
|
||||
*
|
||||
* Return:
|
||||
* 0 - OK, otherwise error code
|
||||
*/
|
||||
static int
|
||||
svm_migrate_vram_to_vram(struct svm_range *prange, uint32_t best_loc,
|
||||
struct mm_struct *mm, uint32_t trigger)
|
||||
unsigned long start, unsigned long last,
|
||||
struct mm_struct *mm, uint32_t trigger)
|
||||
{
|
||||
int r, retries = 3;
|
||||
|
||||
@ -855,7 +887,8 @@ svm_migrate_vram_to_vram(struct svm_range *prange, uint32_t best_loc,
|
||||
pr_debug("from gpu 0x%x to gpu 0x%x\n", prange->actual_loc, best_loc);
|
||||
|
||||
do {
|
||||
r = svm_migrate_vram_to_ram(prange, mm, trigger, NULL);
|
||||
r = svm_migrate_vram_to_ram(prange, mm, prange->start, prange->last,
|
||||
trigger, NULL);
|
||||
if (r)
|
||||
return r;
|
||||
} while (prange->actual_loc && --retries);
|
||||
@ -863,17 +896,21 @@ svm_migrate_vram_to_vram(struct svm_range *prange, uint32_t best_loc,
|
||||
if (prange->actual_loc)
|
||||
return -EDEADLK;
|
||||
|
||||
return svm_migrate_ram_to_vram(prange, best_loc, mm, trigger);
|
||||
return svm_migrate_ram_to_vram(prange, best_loc, start, last, mm, trigger);
|
||||
}
|
||||
|
||||
int
|
||||
svm_migrate_to_vram(struct svm_range *prange, uint32_t best_loc,
|
||||
unsigned long start, unsigned long last,
|
||||
struct mm_struct *mm, uint32_t trigger)
|
||||
{
|
||||
if (!prange->actual_loc)
|
||||
return svm_migrate_ram_to_vram(prange, best_loc, mm, trigger);
|
||||
if (!prange->actual_loc || prange->actual_loc == best_loc)
|
||||
return svm_migrate_ram_to_vram(prange, best_loc, start, last,
|
||||
mm, trigger);
|
||||
|
||||
else
|
||||
return svm_migrate_vram_to_vram(prange, best_loc, mm, trigger);
|
||||
return svm_migrate_vram_to_vram(prange, best_loc, start, last,
|
||||
mm, trigger);
|
||||
|
||||
}
|
||||
|
||||
@ -889,10 +926,9 @@ svm_migrate_to_vram(struct svm_range *prange, uint32_t best_loc,
|
||||
*/
|
||||
static vm_fault_t svm_migrate_to_ram(struct vm_fault *vmf)
|
||||
{
|
||||
unsigned long start, last, size;
|
||||
unsigned long addr = vmf->address;
|
||||
struct svm_range_bo *svm_bo;
|
||||
enum svm_work_list_ops op;
|
||||
struct svm_range *parent;
|
||||
struct svm_range *prange;
|
||||
struct kfd_process *p;
|
||||
struct mm_struct *mm;
|
||||
@ -929,51 +965,31 @@ static vm_fault_t svm_migrate_to_ram(struct vm_fault *vmf)
|
||||
|
||||
mutex_lock(&p->svms.lock);
|
||||
|
||||
prange = svm_range_from_addr(&p->svms, addr, &parent);
|
||||
prange = svm_range_from_addr(&p->svms, addr, NULL);
|
||||
if (!prange) {
|
||||
pr_debug("failed get range svms 0x%p addr 0x%lx\n", &p->svms, addr);
|
||||
r = -EFAULT;
|
||||
goto out_unlock_svms;
|
||||
}
|
||||
|
||||
mutex_lock(&parent->migrate_mutex);
|
||||
if (prange != parent)
|
||||
mutex_lock_nested(&prange->migrate_mutex, 1);
|
||||
mutex_lock(&prange->migrate_mutex);
|
||||
|
||||
if (!prange->actual_loc)
|
||||
goto out_unlock_prange;
|
||||
|
||||
svm_range_lock(parent);
|
||||
if (prange != parent)
|
||||
mutex_lock_nested(&prange->lock, 1);
|
||||
r = svm_range_split_by_granularity(p, mm, addr, parent, prange);
|
||||
if (prange != parent)
|
||||
mutex_unlock(&prange->lock);
|
||||
svm_range_unlock(parent);
|
||||
if (r) {
|
||||
pr_debug("failed %d to split range by granularity\n", r);
|
||||
goto out_unlock_prange;
|
||||
}
|
||||
/* Align migration range start and size to granularity size */
|
||||
size = 1UL << prange->granularity;
|
||||
start = max(ALIGN_DOWN(addr, size), prange->start);
|
||||
last = min(ALIGN(addr + 1, size) - 1, prange->last);
|
||||
|
||||
r = svm_migrate_vram_to_ram(prange, vmf->vma->vm_mm,
|
||||
KFD_MIGRATE_TRIGGER_PAGEFAULT_CPU,
|
||||
vmf->page);
|
||||
r = svm_migrate_vram_to_ram(prange, vmf->vma->vm_mm, start, last,
|
||||
KFD_MIGRATE_TRIGGER_PAGEFAULT_CPU, vmf->page);
|
||||
if (r)
|
||||
pr_debug("failed %d migrate svms 0x%p range 0x%p [0x%lx 0x%lx]\n",
|
||||
r, prange->svms, prange, prange->start, prange->last);
|
||||
|
||||
/* xnack on, update mapping on GPUs with ACCESS_IN_PLACE */
|
||||
if (p->xnack_enabled && parent == prange)
|
||||
op = SVM_OP_UPDATE_RANGE_NOTIFIER_AND_MAP;
|
||||
else
|
||||
op = SVM_OP_UPDATE_RANGE_NOTIFIER;
|
||||
svm_range_add_list_work(&p->svms, parent, mm, op);
|
||||
schedule_deferred_list_work(&p->svms);
|
||||
r, prange->svms, prange, start, last);
|
||||
|
||||
out_unlock_prange:
|
||||
if (prange != parent)
|
||||
mutex_unlock(&prange->migrate_mutex);
|
||||
mutex_unlock(&parent->migrate_mutex);
|
||||
mutex_unlock(&prange->migrate_mutex);
|
||||
out_unlock_svms:
|
||||
mutex_unlock(&p->svms.lock);
|
||||
out_unref_process:
|
||||
|
@ -41,9 +41,13 @@ enum MIGRATION_COPY_DIR {
|
||||
};
|
||||
|
||||
int svm_migrate_to_vram(struct svm_range *prange, uint32_t best_loc,
|
||||
unsigned long start, unsigned long last,
|
||||
struct mm_struct *mm, uint32_t trigger);
|
||||
|
||||
int svm_migrate_vram_to_ram(struct svm_range *prange, struct mm_struct *mm,
|
||||
unsigned long start, unsigned long last,
|
||||
uint32_t trigger, struct page *fault_page);
|
||||
|
||||
unsigned long
|
||||
svm_migrate_addr_to_pfn(struct amdgpu_device *adev, unsigned long addr);
|
||||
|
||||
|
@ -158,12 +158,13 @@ svm_is_valid_dma_mapping_addr(struct device *dev, dma_addr_t dma_addr)
|
||||
static int
|
||||
svm_range_dma_map_dev(struct amdgpu_device *adev, struct svm_range *prange,
|
||||
unsigned long offset, unsigned long npages,
|
||||
unsigned long *hmm_pfns, uint32_t gpuidx)
|
||||
unsigned long *hmm_pfns, uint32_t gpuidx, uint64_t *vram_pages)
|
||||
{
|
||||
enum dma_data_direction dir = DMA_BIDIRECTIONAL;
|
||||
dma_addr_t *addr = prange->dma_addr[gpuidx];
|
||||
struct device *dev = adev->dev;
|
||||
struct page *page;
|
||||
uint64_t vram_pages_dev;
|
||||
int i, r;
|
||||
|
||||
if (!addr) {
|
||||
@ -173,6 +174,7 @@ svm_range_dma_map_dev(struct amdgpu_device *adev, struct svm_range *prange,
|
||||
prange->dma_addr[gpuidx] = addr;
|
||||
}
|
||||
|
||||
vram_pages_dev = 0;
|
||||
addr += offset;
|
||||
for (i = 0; i < npages; i++) {
|
||||
if (svm_is_valid_dma_mapping_addr(dev, addr[i]))
|
||||
@ -182,6 +184,7 @@ svm_range_dma_map_dev(struct amdgpu_device *adev, struct svm_range *prange,
|
||||
if (is_zone_device_page(page)) {
|
||||
struct amdgpu_device *bo_adev = prange->svm_bo->node->adev;
|
||||
|
||||
vram_pages_dev++;
|
||||
addr[i] = (hmm_pfns[i] << PAGE_SHIFT) +
|
||||
bo_adev->vm_manager.vram_base_offset -
|
||||
bo_adev->kfd.pgmap.range.start;
|
||||
@ -198,13 +201,14 @@ svm_range_dma_map_dev(struct amdgpu_device *adev, struct svm_range *prange,
|
||||
pr_debug_ratelimited("dma mapping 0x%llx for page addr 0x%lx\n",
|
||||
addr[i] >> PAGE_SHIFT, page_to_pfn(page));
|
||||
}
|
||||
*vram_pages = vram_pages_dev;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
svm_range_dma_map(struct svm_range *prange, unsigned long *bitmap,
|
||||
unsigned long offset, unsigned long npages,
|
||||
unsigned long *hmm_pfns)
|
||||
unsigned long *hmm_pfns, uint64_t *vram_pages)
|
||||
{
|
||||
struct kfd_process *p;
|
||||
uint32_t gpuidx;
|
||||
@ -223,7 +227,7 @@ svm_range_dma_map(struct svm_range *prange, unsigned long *bitmap,
|
||||
}
|
||||
|
||||
r = svm_range_dma_map_dev(pdd->dev->adev, prange, offset, npages,
|
||||
hmm_pfns, gpuidx);
|
||||
hmm_pfns, gpuidx, vram_pages);
|
||||
if (r)
|
||||
break;
|
||||
}
|
||||
@ -349,6 +353,7 @@ svm_range *svm_range_new(struct svm_range_list *svms, uint64_t start,
|
||||
INIT_LIST_HEAD(&prange->child_list);
|
||||
atomic_set(&prange->invalid, 0);
|
||||
prange->validate_timestamp = 0;
|
||||
prange->vram_pages = 0;
|
||||
mutex_init(&prange->migrate_mutex);
|
||||
mutex_init(&prange->lock);
|
||||
|
||||
@ -395,6 +400,8 @@ static void svm_range_bo_release(struct kref *kref)
|
||||
prange->start, prange->last);
|
||||
mutex_lock(&prange->lock);
|
||||
prange->svm_bo = NULL;
|
||||
/* prange should not hold vram page now */
|
||||
WARN_ONCE(prange->actual_loc, "prange should not hold vram page");
|
||||
mutex_unlock(&prange->lock);
|
||||
|
||||
spin_lock(&svm_bo->list_lock);
|
||||
@ -975,6 +982,11 @@ svm_range_split_nodes(struct svm_range *new, struct svm_range *old,
|
||||
new->svm_bo = svm_range_bo_ref(old->svm_bo);
|
||||
new->ttm_res = old->ttm_res;
|
||||
|
||||
/* set new's vram_pages as old range's now, the acurate vram_pages
|
||||
* will be updated during mapping
|
||||
*/
|
||||
new->vram_pages = min(old->vram_pages, new->npages);
|
||||
|
||||
spin_lock(&new->svm_bo->list_lock);
|
||||
list_add(&new->svm_bo_list, &new->svm_bo->range_list);
|
||||
spin_unlock(&new->svm_bo->list_lock);
|
||||
@ -1135,66 +1147,6 @@ svm_range_add_child(struct svm_range *prange, struct mm_struct *mm,
|
||||
list_add_tail(&pchild->child_list, &prange->child_list);
|
||||
}
|
||||
|
||||
/**
|
||||
* svm_range_split_by_granularity - collect ranges within granularity boundary
|
||||
*
|
||||
* @p: the process with svms list
|
||||
* @mm: mm structure
|
||||
* @addr: the vm fault address in pages, to split the prange
|
||||
* @parent: parent range if prange is from child list
|
||||
* @prange: prange to split
|
||||
*
|
||||
* Trims @prange to be a single aligned block of prange->granularity if
|
||||
* possible. The head and tail are added to the child_list in @parent.
|
||||
*
|
||||
* Context: caller must hold mmap_read_lock and prange->lock
|
||||
*
|
||||
* Return:
|
||||
* 0 - OK, otherwise error code
|
||||
*/
|
||||
int
|
||||
svm_range_split_by_granularity(struct kfd_process *p, struct mm_struct *mm,
|
||||
unsigned long addr, struct svm_range *parent,
|
||||
struct svm_range *prange)
|
||||
{
|
||||
struct svm_range *head, *tail;
|
||||
unsigned long start, last, size;
|
||||
int r;
|
||||
|
||||
/* Align splited range start and size to granularity size, then a single
|
||||
* PTE will be used for whole range, this reduces the number of PTE
|
||||
* updated and the L1 TLB space used for translation.
|
||||
*/
|
||||
size = 1UL << prange->granularity;
|
||||
start = ALIGN_DOWN(addr, size);
|
||||
last = ALIGN(addr + 1, size) - 1;
|
||||
|
||||
pr_debug("svms 0x%p split [0x%lx 0x%lx] to [0x%lx 0x%lx] size 0x%lx\n",
|
||||
prange->svms, prange->start, prange->last, start, last, size);
|
||||
|
||||
if (start > prange->start) {
|
||||
r = svm_range_split(prange, start, prange->last, &head);
|
||||
if (r)
|
||||
return r;
|
||||
svm_range_add_child(parent, mm, head, SVM_OP_ADD_RANGE);
|
||||
}
|
||||
|
||||
if (last < prange->last) {
|
||||
r = svm_range_split(prange, prange->start, last, &tail);
|
||||
if (r)
|
||||
return r;
|
||||
svm_range_add_child(parent, mm, tail, SVM_OP_ADD_RANGE);
|
||||
}
|
||||
|
||||
/* xnack on, update mapping on GPUs with ACCESS_IN_PLACE */
|
||||
if (p->xnack_enabled && prange->work_item.op == SVM_OP_ADD_RANGE) {
|
||||
prange->work_item.op = SVM_OP_ADD_RANGE_AND_MAP;
|
||||
pr_debug("change prange 0x%p [0x%lx 0x%lx] op %d\n",
|
||||
prange, prange->start, prange->last,
|
||||
SVM_OP_ADD_RANGE_AND_MAP);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
static bool
|
||||
svm_nodes_in_same_hive(struct kfd_node *node_a, struct kfd_node *node_b)
|
||||
{
|
||||
@ -1614,12 +1566,14 @@ static void *kfd_svm_page_owner(struct kfd_process *p, int32_t gpuidx)
|
||||
* 5. Release page table (and SVM BO) reservation
|
||||
*/
|
||||
static int svm_range_validate_and_map(struct mm_struct *mm,
|
||||
unsigned long map_start, unsigned long map_last,
|
||||
struct svm_range *prange, int32_t gpuidx,
|
||||
bool intr, bool wait, bool flush_tlb)
|
||||
{
|
||||
struct svm_validate_context *ctx;
|
||||
unsigned long start, end, addr;
|
||||
struct kfd_process *p;
|
||||
uint64_t vram_pages;
|
||||
void *owner;
|
||||
int32_t idx;
|
||||
int r = 0;
|
||||
@ -1688,11 +1642,15 @@ static int svm_range_validate_and_map(struct mm_struct *mm,
|
||||
}
|
||||
}
|
||||
|
||||
vram_pages = 0;
|
||||
start = prange->start << PAGE_SHIFT;
|
||||
end = (prange->last + 1) << PAGE_SHIFT;
|
||||
for (addr = start; !r && addr < end; ) {
|
||||
struct hmm_range *hmm_range;
|
||||
unsigned long map_start_vma;
|
||||
unsigned long map_last_vma;
|
||||
struct vm_area_struct *vma;
|
||||
uint64_t vram_pages_vma;
|
||||
unsigned long next = 0;
|
||||
unsigned long offset;
|
||||
unsigned long npages;
|
||||
@ -1721,9 +1679,11 @@ static int svm_range_validate_and_map(struct mm_struct *mm,
|
||||
if (!r) {
|
||||
offset = (addr - start) >> PAGE_SHIFT;
|
||||
r = svm_range_dma_map(prange, ctx->bitmap, offset, npages,
|
||||
hmm_range->hmm_pfns);
|
||||
hmm_range->hmm_pfns, &vram_pages_vma);
|
||||
if (r)
|
||||
pr_debug("failed %d to dma map range\n", r);
|
||||
else
|
||||
vram_pages += vram_pages_vma;
|
||||
}
|
||||
|
||||
svm_range_lock(prange);
|
||||
@ -1737,9 +1697,16 @@ static int svm_range_validate_and_map(struct mm_struct *mm,
|
||||
r = -EAGAIN;
|
||||
}
|
||||
|
||||
if (!r)
|
||||
r = svm_range_map_to_gpus(prange, offset, npages, readonly,
|
||||
ctx->bitmap, wait, flush_tlb);
|
||||
if (!r) {
|
||||
map_start_vma = max(map_start, prange->start + offset);
|
||||
map_last_vma = min(map_last, prange->start + offset + npages - 1);
|
||||
if (map_start_vma <= map_last_vma) {
|
||||
offset = map_start_vma - prange->start;
|
||||
npages = map_last_vma - map_start_vma + 1;
|
||||
r = svm_range_map_to_gpus(prange, offset, npages, readonly,
|
||||
ctx->bitmap, wait, flush_tlb);
|
||||
}
|
||||
}
|
||||
|
||||
if (!r && next == end)
|
||||
prange->mapped_to_gpu = true;
|
||||
@ -1749,6 +1716,19 @@ static int svm_range_validate_and_map(struct mm_struct *mm,
|
||||
addr = next;
|
||||
}
|
||||
|
||||
if (addr == end) {
|
||||
prange->vram_pages = vram_pages;
|
||||
|
||||
/* if prange does not include any vram page and it
|
||||
* has not released svm_bo drop its svm_bo reference
|
||||
* and set its actaul_loc to sys ram
|
||||
*/
|
||||
if (!vram_pages && prange->ttm_res) {
|
||||
prange->actual_loc = 0;
|
||||
svm_range_vram_node_free(prange);
|
||||
}
|
||||
}
|
||||
|
||||
svm_range_unreserve_bos(ctx);
|
||||
if (!r)
|
||||
prange->validate_timestamp = ktime_get_boottime();
|
||||
@ -1832,8 +1812,8 @@ static void svm_range_restore_work(struct work_struct *work)
|
||||
*/
|
||||
mutex_lock(&prange->migrate_mutex);
|
||||
|
||||
r = svm_range_validate_and_map(mm, prange, MAX_GPU_INSTANCE,
|
||||
false, true, false);
|
||||
r = svm_range_validate_and_map(mm, prange->start, prange->last, prange,
|
||||
MAX_GPU_INSTANCE, false, true, false);
|
||||
if (r)
|
||||
pr_debug("failed %d to map 0x%lx to gpus\n", r,
|
||||
prange->start);
|
||||
@ -2001,6 +1981,7 @@ static struct svm_range *svm_range_clone(struct svm_range *old)
|
||||
new->actual_loc = old->actual_loc;
|
||||
new->granularity = old->granularity;
|
||||
new->mapped_to_gpu = old->mapped_to_gpu;
|
||||
new->vram_pages = old->vram_pages;
|
||||
bitmap_copy(new->bitmap_access, old->bitmap_access, MAX_GPU_INSTANCE);
|
||||
bitmap_copy(new->bitmap_aip, old->bitmap_aip, MAX_GPU_INSTANCE);
|
||||
|
||||
@ -2908,6 +2889,7 @@ svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid,
|
||||
uint32_t vmid, uint32_t node_id,
|
||||
uint64_t addr, bool write_fault)
|
||||
{
|
||||
unsigned long start, last, size;
|
||||
struct mm_struct *mm = NULL;
|
||||
struct svm_range_list *svms;
|
||||
struct svm_range *prange;
|
||||
@ -3043,40 +3025,44 @@ retry_write_locked:
|
||||
kfd_smi_event_page_fault_start(node, p->lead_thread->pid, addr,
|
||||
write_fault, timestamp);
|
||||
|
||||
if (prange->actual_loc != best_loc) {
|
||||
/* Align migration range start and size to granularity size */
|
||||
size = 1UL << prange->granularity;
|
||||
start = max_t(unsigned long, ALIGN_DOWN(addr, size), prange->start);
|
||||
last = min_t(unsigned long, ALIGN(addr + 1, size) - 1, prange->last);
|
||||
if (prange->actual_loc != 0 || best_loc != 0) {
|
||||
migration = true;
|
||||
|
||||
if (best_loc) {
|
||||
r = svm_migrate_to_vram(prange, best_loc, mm,
|
||||
KFD_MIGRATE_TRIGGER_PAGEFAULT_GPU);
|
||||
r = svm_migrate_to_vram(prange, best_loc, start, last,
|
||||
mm, KFD_MIGRATE_TRIGGER_PAGEFAULT_GPU);
|
||||
if (r) {
|
||||
pr_debug("svm_migrate_to_vram failed (%d) at %llx, falling back to system memory\n",
|
||||
r, addr);
|
||||
/* Fallback to system memory if migration to
|
||||
* VRAM failed
|
||||
*/
|
||||
if (prange->actual_loc)
|
||||
r = svm_migrate_vram_to_ram(prange, mm,
|
||||
KFD_MIGRATE_TRIGGER_PAGEFAULT_GPU,
|
||||
NULL);
|
||||
if (prange->actual_loc && prange->actual_loc != best_loc)
|
||||
r = svm_migrate_vram_to_ram(prange, mm, start, last,
|
||||
KFD_MIGRATE_TRIGGER_PAGEFAULT_GPU, NULL);
|
||||
else
|
||||
r = 0;
|
||||
}
|
||||
} else {
|
||||
r = svm_migrate_vram_to_ram(prange, mm,
|
||||
KFD_MIGRATE_TRIGGER_PAGEFAULT_GPU,
|
||||
NULL);
|
||||
r = svm_migrate_vram_to_ram(prange, mm, start, last,
|
||||
KFD_MIGRATE_TRIGGER_PAGEFAULT_GPU, NULL);
|
||||
}
|
||||
if (r) {
|
||||
pr_debug("failed %d to migrate svms %p [0x%lx 0x%lx]\n",
|
||||
r, svms, prange->start, prange->last);
|
||||
r, svms, start, last);
|
||||
goto out_unlock_range;
|
||||
}
|
||||
}
|
||||
|
||||
r = svm_range_validate_and_map(mm, prange, gpuidx, false, false, false);
|
||||
r = svm_range_validate_and_map(mm, start, last, prange, gpuidx, false,
|
||||
false, false);
|
||||
if (r)
|
||||
pr_debug("failed %d to map svms 0x%p [0x%lx 0x%lx] to gpus\n",
|
||||
r, svms, prange->start, prange->last);
|
||||
r, svms, start, last);
|
||||
|
||||
kfd_smi_event_page_fault_end(node, p->lead_thread->pid, addr,
|
||||
migration);
|
||||
@ -3422,18 +3408,24 @@ svm_range_trigger_migration(struct mm_struct *mm, struct svm_range *prange,
|
||||
*migrated = false;
|
||||
best_loc = svm_range_best_prefetch_location(prange);
|
||||
|
||||
if (best_loc == KFD_IOCTL_SVM_LOCATION_UNDEFINED ||
|
||||
best_loc == prange->actual_loc)
|
||||
/* when best_loc is a gpu node and same as prange->actual_loc
|
||||
* we still need do migration as prange->actual_loc !=0 does
|
||||
* not mean all pages in prange are vram. hmm migrate will pick
|
||||
* up right pages during migration.
|
||||
*/
|
||||
if ((best_loc == KFD_IOCTL_SVM_LOCATION_UNDEFINED) ||
|
||||
(best_loc == 0 && prange->actual_loc == 0))
|
||||
return 0;
|
||||
|
||||
if (!best_loc) {
|
||||
r = svm_migrate_vram_to_ram(prange, mm,
|
||||
r = svm_migrate_vram_to_ram(prange, mm, prange->start, prange->last,
|
||||
KFD_MIGRATE_TRIGGER_PREFETCH, NULL);
|
||||
*migrated = !r;
|
||||
return r;
|
||||
}
|
||||
|
||||
r = svm_migrate_to_vram(prange, best_loc, mm, KFD_MIGRATE_TRIGGER_PREFETCH);
|
||||
r = svm_migrate_to_vram(prange, best_loc, prange->start, prange->last,
|
||||
mm, KFD_MIGRATE_TRIGGER_PREFETCH);
|
||||
*migrated = !r;
|
||||
|
||||
return r;
|
||||
@ -3488,7 +3480,11 @@ static void svm_range_evict_svm_bo_worker(struct work_struct *work)
|
||||
|
||||
mutex_lock(&prange->migrate_mutex);
|
||||
do {
|
||||
/* migrate all vram pages in this prange to sys ram
|
||||
* after that prange->actual_loc should be zero
|
||||
*/
|
||||
r = svm_migrate_vram_to_ram(prange, mm,
|
||||
prange->start, prange->last,
|
||||
KFD_MIGRATE_TRIGGER_TTM_EVICTION, NULL);
|
||||
} while (!r && prange->actual_loc && --retries);
|
||||
|
||||
@ -3612,8 +3608,8 @@ svm_range_set_attr(struct kfd_process *p, struct mm_struct *mm,
|
||||
|
||||
flush_tlb = !migrated && update_mapping && prange->mapped_to_gpu;
|
||||
|
||||
r = svm_range_validate_and_map(mm, prange, MAX_GPU_INSTANCE,
|
||||
true, true, flush_tlb);
|
||||
r = svm_range_validate_and_map(mm, prange->start, prange->last, prange,
|
||||
MAX_GPU_INSTANCE, true, true, flush_tlb);
|
||||
if (r)
|
||||
pr_debug("failed %d to map svm range\n", r);
|
||||
|
||||
@ -3627,8 +3623,8 @@ out_unlock_range:
|
||||
pr_debug("Remapping prange 0x%p [0x%lx 0x%lx]\n",
|
||||
prange, prange->start, prange->last);
|
||||
mutex_lock(&prange->migrate_mutex);
|
||||
r = svm_range_validate_and_map(mm, prange, MAX_GPU_INSTANCE,
|
||||
true, true, prange->mapped_to_gpu);
|
||||
r = svm_range_validate_and_map(mm, prange->start, prange->last, prange,
|
||||
MAX_GPU_INSTANCE, true, true, prange->mapped_to_gpu);
|
||||
if (r)
|
||||
pr_debug("failed %d on remap svm range\n", r);
|
||||
mutex_unlock(&prange->migrate_mutex);
|
||||
|
@ -78,6 +78,7 @@ struct svm_work_list_item {
|
||||
* @update_list:link list node used to add to update_list
|
||||
* @mapping: bo_va mapping structure to create and update GPU page table
|
||||
* @npages: number of pages
|
||||
* @vram_pages: vram pages number in this svm_range
|
||||
* @dma_addr: dma mapping address on each GPU for system memory physical page
|
||||
* @ttm_res: vram ttm resource map
|
||||
* @offset: range start offset within mm_nodes
|
||||
@ -88,7 +89,9 @@ struct svm_work_list_item {
|
||||
* @flags: flags defined as KFD_IOCTL_SVM_FLAG_*
|
||||
* @perferred_loc: perferred location, 0 for CPU, or GPU id
|
||||
* @perfetch_loc: last prefetch location, 0 for CPU, or GPU id
|
||||
* @actual_loc: the actual location, 0 for CPU, or GPU id
|
||||
* @actual_loc: this svm_range location. 0: all pages are from sys ram;
|
||||
* GPU id: this svm_range may include vram pages from GPU with
|
||||
* id actual_loc.
|
||||
* @granularity:migration granularity, log2 num pages
|
||||
* @invalid: not 0 means cpu page table is invalidated
|
||||
* @validate_timestamp: system timestamp when range is validated
|
||||
@ -112,6 +115,7 @@ struct svm_range {
|
||||
struct list_head list;
|
||||
struct list_head update_list;
|
||||
uint64_t npages;
|
||||
uint64_t vram_pages;
|
||||
dma_addr_t *dma_addr[MAX_GPU_INSTANCE];
|
||||
struct ttm_resource *ttm_res;
|
||||
uint64_t offset;
|
||||
@ -168,9 +172,6 @@ struct kfd_node *svm_range_get_node_by_id(struct svm_range *prange,
|
||||
int svm_range_vram_node_new(struct kfd_node *node, struct svm_range *prange,
|
||||
bool clear);
|
||||
void svm_range_vram_node_free(struct svm_range *prange);
|
||||
int svm_range_split_by_granularity(struct kfd_process *p, struct mm_struct *mm,
|
||||
unsigned long addr, struct svm_range *parent,
|
||||
struct svm_range *prange);
|
||||
int svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid,
|
||||
uint32_t vmid, uint32_t node_id, uint64_t addr,
|
||||
bool write_fault);
|
||||
|
@ -25,22 +25,25 @@
|
||||
|
||||
|
||||
|
||||
ifneq ($(CONFIG_DRM_AMD_DC),)
|
||||
AMDGPUDM = \
|
||||
amdgpu_dm.o \
|
||||
amdgpu_dm_plane.o \
|
||||
amdgpu_dm_crtc.o \
|
||||
amdgpu_dm_irq.o \
|
||||
amdgpu_dm_mst_types.o \
|
||||
amdgpu_dm_color.o
|
||||
amdgpu_dm_color.o \
|
||||
amdgpu_dm_services.o \
|
||||
amdgpu_dm_helpers.o \
|
||||
amdgpu_dm_pp_smu.o \
|
||||
amdgpu_dm_psr.o \
|
||||
amdgpu_dm_replay.o \
|
||||
amdgpu_dm_wb.o
|
||||
|
||||
ifdef CONFIG_DRM_AMD_DC_FP
|
||||
AMDGPUDM += dc_fpu.o
|
||||
endif
|
||||
|
||||
ifneq ($(CONFIG_DRM_AMD_DC),)
|
||||
AMDGPUDM += amdgpu_dm_services.o amdgpu_dm_helpers.o amdgpu_dm_pp_smu.o amdgpu_dm_psr.o amdgpu_dm_replay.o
|
||||
endif
|
||||
|
||||
AMDGPUDM += amdgpu_dm_hdcp.o
|
||||
|
||||
ifneq ($(CONFIG_DEBUG_FS),)
|
||||
@ -52,3 +55,4 @@ subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc
|
||||
AMDGPU_DM = $(addprefix $(AMDDALPATH)/amdgpu_dm/,$(AMDGPUDM))
|
||||
|
||||
AMD_DISPLAY_FILES += $(AMDGPU_DM)
|
||||
endif
|
||||
|
@ -54,6 +54,7 @@
|
||||
#include "amdgpu_dm_crtc.h"
|
||||
#include "amdgpu_dm_hdcp.h"
|
||||
#include <drm/display/drm_hdcp_helper.h>
|
||||
#include "amdgpu_dm_wb.h"
|
||||
#include "amdgpu_pm.h"
|
||||
#include "amdgpu_atombios.h"
|
||||
|
||||
@ -576,6 +577,7 @@ static void dm_crtc_high_irq(void *interrupt_params)
|
||||
{
|
||||
struct common_irq_params *irq_params = interrupt_params;
|
||||
struct amdgpu_device *adev = irq_params->adev;
|
||||
struct drm_writeback_job *job;
|
||||
struct amdgpu_crtc *acrtc;
|
||||
unsigned long flags;
|
||||
int vrr_active;
|
||||
@ -584,6 +586,33 @@ static void dm_crtc_high_irq(void *interrupt_params)
|
||||
if (!acrtc)
|
||||
return;
|
||||
|
||||
if (acrtc->wb_pending) {
|
||||
if (acrtc->wb_conn) {
|
||||
spin_lock_irqsave(&acrtc->wb_conn->job_lock, flags);
|
||||
job = list_first_entry_or_null(&acrtc->wb_conn->job_queue,
|
||||
struct drm_writeback_job,
|
||||
list_entry);
|
||||
spin_unlock_irqrestore(&acrtc->wb_conn->job_lock, flags);
|
||||
|
||||
if (job) {
|
||||
unsigned int v_total, refresh_hz;
|
||||
struct dc_stream_state *stream = acrtc->dm_irq_params.stream;
|
||||
|
||||
v_total = stream->adjust.v_total_max ?
|
||||
stream->adjust.v_total_max : stream->timing.v_total;
|
||||
refresh_hz = div_u64((uint64_t) stream->timing.pix_clk_100hz *
|
||||
100LL, (v_total * stream->timing.h_total));
|
||||
mdelay(1000 / refresh_hz);
|
||||
|
||||
drm_writeback_signal_completion(acrtc->wb_conn, 0);
|
||||
dc_stream_fc_disable_writeback(adev->dm.dc,
|
||||
acrtc->dm_irq_params.stream, 0);
|
||||
}
|
||||
} else
|
||||
DRM_ERROR("%s: no amdgpu_crtc wb_conn\n", __func__);
|
||||
acrtc->wb_pending = false;
|
||||
}
|
||||
|
||||
vrr_active = amdgpu_dm_crtc_vrr_active_irq(acrtc);
|
||||
|
||||
drm_dbg_vbl(adev_to_drm(adev),
|
||||
@ -726,6 +755,10 @@ static void dmub_hpd_callback(struct amdgpu_device *adev,
|
||||
|
||||
drm_connector_list_iter_begin(dev, &iter);
|
||||
drm_for_each_connector_iter(connector, &iter) {
|
||||
|
||||
if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
|
||||
continue;
|
||||
|
||||
aconnector = to_amdgpu_dm_connector(connector);
|
||||
if (link && aconnector->dc_link == link) {
|
||||
if (notify->type == DMUB_NOTIFICATION_HPD)
|
||||
@ -949,6 +982,10 @@ static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
|
||||
|
||||
drm_connector_list_iter_begin(dev, &conn_iter);
|
||||
drm_for_each_connector_iter(connector, &conn_iter) {
|
||||
|
||||
if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
|
||||
continue;
|
||||
|
||||
aconnector = to_amdgpu_dm_connector(connector);
|
||||
if (aconnector->audio_inst != port)
|
||||
continue;
|
||||
@ -1674,6 +1711,10 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
|
||||
init_data.nbio_reg_offsets = adev->reg_offset[NBIO_HWIP][0];
|
||||
init_data.clk_reg_offsets = adev->reg_offset[CLK_HWIP][0];
|
||||
|
||||
/* Enable DWB for tested platforms only */
|
||||
if (adev->ip_versions[DCE_HWIP][0] >= IP_VERSION(3, 0, 0))
|
||||
init_data.num_virtual_links = 1;
|
||||
|
||||
INIT_LIST_HEAD(&adev->dm.da_list);
|
||||
|
||||
retrieve_dmi_info(&adev->dm);
|
||||
@ -2251,6 +2292,10 @@ static int detect_mst_link_for_all_connectors(struct drm_device *dev)
|
||||
|
||||
drm_connector_list_iter_begin(dev, &iter);
|
||||
drm_for_each_connector_iter(connector, &iter) {
|
||||
|
||||
if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
|
||||
continue;
|
||||
|
||||
aconnector = to_amdgpu_dm_connector(connector);
|
||||
if (aconnector->dc_link->type == dc_connection_mst_branch &&
|
||||
aconnector->mst_mgr.aux) {
|
||||
@ -2379,6 +2424,10 @@ static void s3_handle_mst(struct drm_device *dev, bool suspend)
|
||||
|
||||
drm_connector_list_iter_begin(dev, &iter);
|
||||
drm_for_each_connector_iter(connector, &iter) {
|
||||
|
||||
if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
|
||||
continue;
|
||||
|
||||
aconnector = to_amdgpu_dm_connector(connector);
|
||||
if (aconnector->dc_link->type != dc_connection_mst_branch ||
|
||||
aconnector->mst_root)
|
||||
@ -2642,7 +2691,7 @@ static int dm_suspend(void *handle)
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct amdgpu_dm_connector *
|
||||
struct drm_connector *
|
||||
amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
|
||||
struct drm_crtc *crtc)
|
||||
{
|
||||
@ -2655,7 +2704,7 @@ amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
|
||||
crtc_from_state = new_con_state->crtc;
|
||||
|
||||
if (crtc_from_state == crtc)
|
||||
return to_amdgpu_dm_connector(connector);
|
||||
return connector;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
@ -2900,6 +2949,10 @@ static int dm_resume(void *handle)
|
||||
/* Do detection*/
|
||||
drm_connector_list_iter_begin(ddev, &iter);
|
||||
drm_for_each_connector_iter(connector, &iter) {
|
||||
|
||||
if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
|
||||
continue;
|
||||
|
||||
aconnector = to_amdgpu_dm_connector(connector);
|
||||
|
||||
if (!aconnector->dc_link)
|
||||
@ -3473,6 +3526,9 @@ static void register_hpd_handlers(struct amdgpu_device *adev)
|
||||
list_for_each_entry(connector,
|
||||
&dev->mode_config.connector_list, head) {
|
||||
|
||||
if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
|
||||
continue;
|
||||
|
||||
aconnector = to_amdgpu_dm_connector(connector);
|
||||
dc_link = aconnector->dc_link;
|
||||
|
||||
@ -4464,6 +4520,28 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
|
||||
continue;
|
||||
}
|
||||
|
||||
link = dc_get_link_at_index(dm->dc, i);
|
||||
|
||||
if (link->connector_signal == SIGNAL_TYPE_VIRTUAL) {
|
||||
struct amdgpu_dm_wb_connector *wbcon = kzalloc(sizeof(*wbcon), GFP_KERNEL);
|
||||
|
||||
if (!wbcon) {
|
||||
DRM_ERROR("KMS: Failed to allocate writeback connector\n");
|
||||
continue;
|
||||
}
|
||||
|
||||
if (amdgpu_dm_wb_connector_init(dm, wbcon, i)) {
|
||||
DRM_ERROR("KMS: Failed to initialize writeback connector\n");
|
||||
kfree(wbcon);
|
||||
continue;
|
||||
}
|
||||
|
||||
link->psr_settings.psr_feature_enabled = false;
|
||||
link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
|
||||
|
||||
continue;
|
||||
}
|
||||
|
||||
aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
|
||||
if (!aconnector)
|
||||
goto fail;
|
||||
@ -4482,8 +4560,6 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
|
||||
goto fail;
|
||||
}
|
||||
|
||||
link = dc_get_link_at_index(dm->dc, i);
|
||||
|
||||
if (!dc_link_detect_connection_type(link, &new_connection_type))
|
||||
DRM_ERROR("KMS: Failed to detect connector\n");
|
||||
|
||||
@ -5164,6 +5240,9 @@ static void fill_dc_dirty_rects(struct drm_plane *plane,
|
||||
if (plane->type == DRM_PLANE_TYPE_CURSOR)
|
||||
return;
|
||||
|
||||
if (new_plane_state->rotation != DRM_MODE_ROTATE_0)
|
||||
goto ffu;
|
||||
|
||||
num_clips = drm_plane_get_damage_clips_count(new_plane_state);
|
||||
clips = drm_plane_get_damage_clips(new_plane_state);
|
||||
|
||||
@ -5490,10 +5569,13 @@ static void fill_stream_properties_from_drm_display_mode(
|
||||
{
|
||||
struct dc_crtc_timing *timing_out = &stream->timing;
|
||||
const struct drm_display_info *info = &connector->display_info;
|
||||
struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
|
||||
struct amdgpu_dm_connector *aconnector = NULL;
|
||||
struct hdmi_vendor_infoframe hv_frame;
|
||||
struct hdmi_avi_infoframe avi_frame;
|
||||
|
||||
if (connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK)
|
||||
aconnector = to_amdgpu_dm_connector(connector);
|
||||
|
||||
memset(&hv_frame, 0, sizeof(hv_frame));
|
||||
memset(&avi_frame, 0, sizeof(avi_frame));
|
||||
|
||||
@ -5506,6 +5588,7 @@ static void fill_stream_properties_from_drm_display_mode(
|
||||
&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
|
||||
timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
|
||||
else if (drm_mode_is_420_also(info, mode_in)
|
||||
&& aconnector
|
||||
&& aconnector->force_yuv420_output)
|
||||
timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
|
||||
else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCBCR444)
|
||||
@ -5541,7 +5624,7 @@ static void fill_stream_properties_from_drm_display_mode(
|
||||
timing_out->hdmi_vic = hv_frame.vic;
|
||||
}
|
||||
|
||||
if (is_freesync_video_mode(mode_in, aconnector)) {
|
||||
if (aconnector && is_freesync_video_mode(mode_in, aconnector)) {
|
||||
timing_out->h_addressable = mode_in->hdisplay;
|
||||
timing_out->h_total = mode_in->htotal;
|
||||
timing_out->h_sync_width = mode_in->hsync_end - mode_in->hsync_start;
|
||||
@ -5662,13 +5745,13 @@ decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
|
||||
}
|
||||
|
||||
static struct dc_sink *
|
||||
create_fake_sink(struct amdgpu_dm_connector *aconnector)
|
||||
create_fake_sink(struct dc_link *link)
|
||||
{
|
||||
struct dc_sink_init_data sink_init_data = { 0 };
|
||||
struct dc_sink *sink = NULL;
|
||||
|
||||
sink_init_data.link = aconnector->dc_link;
|
||||
sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
|
||||
sink_init_data.link = link;
|
||||
sink_init_data.sink_signal = link->connector_signal;
|
||||
|
||||
sink = dc_sink_create(&sink_init_data);
|
||||
if (!sink) {
|
||||
@ -6018,14 +6101,14 @@ static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
|
||||
}
|
||||
|
||||
static struct dc_stream_state *
|
||||
create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
|
||||
create_stream_for_sink(struct drm_connector *connector,
|
||||
const struct drm_display_mode *drm_mode,
|
||||
const struct dm_connector_state *dm_state,
|
||||
const struct dc_stream_state *old_stream,
|
||||
int requested_bpc)
|
||||
{
|
||||
struct amdgpu_dm_connector *aconnector = NULL;
|
||||
struct drm_display_mode *preferred_mode = NULL;
|
||||
struct drm_connector *drm_connector;
|
||||
const struct drm_connector_state *con_state = &dm_state->base;
|
||||
struct dc_stream_state *stream = NULL;
|
||||
struct drm_display_mode mode;
|
||||
@ -6039,22 +6122,35 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
|
||||
enum color_transfer_func tf = TRANSFER_FUNC_UNKNOWN;
|
||||
struct dsc_dec_dpcd_caps dsc_caps;
|
||||
|
||||
struct dc_link *link = NULL;
|
||||
struct dc_sink *sink = NULL;
|
||||
|
||||
drm_mode_init(&mode, drm_mode);
|
||||
memset(&saved_mode, 0, sizeof(saved_mode));
|
||||
|
||||
if (aconnector == NULL) {
|
||||
DRM_ERROR("aconnector is NULL!\n");
|
||||
if (connector == NULL) {
|
||||
DRM_ERROR("connector is NULL!\n");
|
||||
return stream;
|
||||
}
|
||||
|
||||
drm_connector = &aconnector->base;
|
||||
if (connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK) {
|
||||
aconnector = NULL;
|
||||
aconnector = to_amdgpu_dm_connector(connector);
|
||||
link = aconnector->dc_link;
|
||||
} else {
|
||||
struct drm_writeback_connector *wbcon = NULL;
|
||||
struct amdgpu_dm_wb_connector *dm_wbcon = NULL;
|
||||
|
||||
if (!aconnector->dc_sink) {
|
||||
sink = create_fake_sink(aconnector);
|
||||
wbcon = drm_connector_to_writeback(connector);
|
||||
dm_wbcon = to_amdgpu_dm_wb_connector(wbcon);
|
||||
link = dm_wbcon->link;
|
||||
}
|
||||
|
||||
if (!aconnector || !aconnector->dc_sink) {
|
||||
sink = create_fake_sink(link);
|
||||
if (!sink)
|
||||
return stream;
|
||||
|
||||
} else {
|
||||
sink = aconnector->dc_sink;
|
||||
dc_sink_retain(sink);
|
||||
@ -6067,12 +6163,13 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
|
||||
goto finish;
|
||||
}
|
||||
|
||||
/* We leave this NULL for writeback connectors */
|
||||
stream->dm_stream_context = aconnector;
|
||||
|
||||
stream->timing.flags.LTE_340MCSC_SCRAMBLE =
|
||||
drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
|
||||
connector->display_info.hdmi.scdc.scrambling.low_rates;
|
||||
|
||||
list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
|
||||
list_for_each_entry(preferred_mode, &connector->modes, head) {
|
||||
/* Search for preferred mode */
|
||||
if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
|
||||
native_mode_found = true;
|
||||
@ -6081,7 +6178,7 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
|
||||
}
|
||||
if (!native_mode_found)
|
||||
preferred_mode = list_first_entry_or_null(
|
||||
&aconnector->base.modes,
|
||||
&connector->modes,
|
||||
struct drm_display_mode,
|
||||
head);
|
||||
|
||||
@ -6095,7 +6192,7 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
|
||||
* and the modelist may not be filled in time.
|
||||
*/
|
||||
DRM_DEBUG_DRIVER("No preferred mode found\n");
|
||||
} else {
|
||||
} else if (aconnector) {
|
||||
recalculate_timing = is_freesync_video_mode(&mode, aconnector);
|
||||
if (recalculate_timing) {
|
||||
freesync_mode = get_highest_refresh_rate_mode(aconnector, false);
|
||||
@ -6118,13 +6215,17 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
|
||||
*/
|
||||
if (!scale || mode_refresh != preferred_refresh)
|
||||
fill_stream_properties_from_drm_display_mode(
|
||||
stream, &mode, &aconnector->base, con_state, NULL,
|
||||
stream, &mode, connector, con_state, NULL,
|
||||
requested_bpc);
|
||||
else
|
||||
fill_stream_properties_from_drm_display_mode(
|
||||
stream, &mode, &aconnector->base, con_state, old_stream,
|
||||
stream, &mode, connector, con_state, old_stream,
|
||||
requested_bpc);
|
||||
|
||||
/* The rest isn't needed for writeback connectors */
|
||||
if (!aconnector)
|
||||
goto finish;
|
||||
|
||||
if (aconnector->timing_changed) {
|
||||
drm_dbg(aconnector->base.dev,
|
||||
"overriding timing for automated test, bpc %d, changing to %d\n",
|
||||
@ -6142,7 +6243,7 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
|
||||
|
||||
fill_audio_info(
|
||||
&stream->audio_info,
|
||||
drm_connector,
|
||||
connector,
|
||||
sink);
|
||||
|
||||
update_stream_signal(stream, sink);
|
||||
@ -6610,7 +6711,7 @@ create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
|
||||
enum dc_status dc_result = DC_OK;
|
||||
|
||||
do {
|
||||
stream = create_stream_for_sink(aconnector, drm_mode,
|
||||
stream = create_stream_for_sink(connector, drm_mode,
|
||||
dm_state, old_stream,
|
||||
requested_bpc);
|
||||
if (stream == NULL) {
|
||||
@ -6618,6 +6719,9 @@ create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
|
||||
break;
|
||||
}
|
||||
|
||||
if (aconnector->base.connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
|
||||
return stream;
|
||||
|
||||
dc_result = dc_validate_stream(adev->dm.dc, stream);
|
||||
if (dc_result == DC_OK && stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
|
||||
dc_result = dm_dp_mst_is_port_support_mode(aconnector, stream);
|
||||
@ -6938,6 +7042,9 @@ static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
|
||||
|
||||
for_each_new_connector_in_state(state, connector, new_con_state, i) {
|
||||
|
||||
if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
|
||||
continue;
|
||||
|
||||
aconnector = to_amdgpu_dm_connector(connector);
|
||||
|
||||
if (!aconnector->mst_output_port)
|
||||
@ -7543,6 +7650,7 @@ static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
|
||||
struct dc_link *link = dc_get_link_at_index(dc, link_index);
|
||||
struct amdgpu_i2c_adapter *i2c;
|
||||
|
||||
/* Not needed for writeback connector */
|
||||
link->priv = aconnector;
|
||||
|
||||
|
||||
@ -8497,6 +8605,9 @@ static void amdgpu_dm_commit_audio(struct drm_device *dev,
|
||||
if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
|
||||
continue;
|
||||
|
||||
if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
|
||||
continue;
|
||||
|
||||
notify:
|
||||
aconnector = to_amdgpu_dm_connector(connector);
|
||||
|
||||
@ -8530,6 +8641,9 @@ notify:
|
||||
if (!status)
|
||||
continue;
|
||||
|
||||
if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
|
||||
continue;
|
||||
|
||||
aconnector = to_amdgpu_dm_connector(connector);
|
||||
|
||||
mutex_lock(&adev->dm.audio_lock);
|
||||
@ -8555,6 +8669,12 @@ static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_stat
|
||||
stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
|
||||
}
|
||||
|
||||
static void dm_clear_writeback(struct amdgpu_display_manager *dm,
|
||||
struct dm_crtc_state *crtc_state)
|
||||
{
|
||||
dc_stream_remove_writeback(dm->dc, crtc_state->stream, 0);
|
||||
}
|
||||
|
||||
static void amdgpu_dm_commit_streams(struct drm_atomic_state *state,
|
||||
struct dc_state *dc_state)
|
||||
{
|
||||
@ -8564,9 +8684,38 @@ static void amdgpu_dm_commit_streams(struct drm_atomic_state *state,
|
||||
struct drm_crtc *crtc;
|
||||
struct drm_crtc_state *old_crtc_state, *new_crtc_state;
|
||||
struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
|
||||
struct drm_connector_state *old_con_state;
|
||||
struct drm_connector *connector;
|
||||
bool mode_set_reset_required = false;
|
||||
u32 i;
|
||||
|
||||
/* Disable writeback */
|
||||
for_each_old_connector_in_state(state, connector, old_con_state, i) {
|
||||
struct dm_connector_state *dm_old_con_state;
|
||||
struct amdgpu_crtc *acrtc;
|
||||
|
||||
if (connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK)
|
||||
continue;
|
||||
|
||||
old_crtc_state = NULL;
|
||||
|
||||
dm_old_con_state = to_dm_connector_state(old_con_state);
|
||||
if (!dm_old_con_state->base.crtc)
|
||||
continue;
|
||||
|
||||
acrtc = to_amdgpu_crtc(dm_old_con_state->base.crtc);
|
||||
if (acrtc)
|
||||
old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
|
||||
|
||||
if (!acrtc->wb_enabled)
|
||||
continue;
|
||||
|
||||
dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
|
||||
|
||||
dm_clear_writeback(dm, dm_old_crtc_state);
|
||||
acrtc->wb_enabled = false;
|
||||
}
|
||||
|
||||
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
|
||||
new_crtc_state, i) {
|
||||
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
|
||||
@ -8703,6 +8852,103 @@ static void amdgpu_dm_commit_streams(struct drm_atomic_state *state,
|
||||
}
|
||||
}
|
||||
|
||||
static void dm_set_writeback(struct amdgpu_display_manager *dm,
|
||||
struct dm_crtc_state *crtc_state,
|
||||
struct drm_connector *connector,
|
||||
struct drm_connector_state *new_con_state)
|
||||
{
|
||||
struct drm_writeback_connector *wb_conn = drm_connector_to_writeback(connector);
|
||||
struct amdgpu_device *adev = dm->adev;
|
||||
struct amdgpu_crtc *acrtc;
|
||||
struct dc_writeback_info *wb_info;
|
||||
struct pipe_ctx *pipe = NULL;
|
||||
struct amdgpu_framebuffer *afb;
|
||||
int i = 0;
|
||||
|
||||
wb_info = kzalloc(sizeof(*wb_info), GFP_KERNEL);
|
||||
if (!wb_info) {
|
||||
DRM_ERROR("Failed to allocate wb_info\n");
|
||||
return;
|
||||
}
|
||||
|
||||
acrtc = to_amdgpu_crtc(wb_conn->encoder.crtc);
|
||||
if (!acrtc) {
|
||||
DRM_ERROR("no amdgpu_crtc found\n");
|
||||
return;
|
||||
}
|
||||
|
||||
afb = to_amdgpu_framebuffer(new_con_state->writeback_job->fb);
|
||||
if (!afb) {
|
||||
DRM_ERROR("No amdgpu_framebuffer found\n");
|
||||
return;
|
||||
}
|
||||
|
||||
for (i = 0; i < MAX_PIPES; i++) {
|
||||
if (dm->dc->current_state->res_ctx.pipe_ctx[i].stream == crtc_state->stream) {
|
||||
pipe = &dm->dc->current_state->res_ctx.pipe_ctx[i];
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/* fill in wb_info */
|
||||
wb_info->wb_enabled = true;
|
||||
|
||||
wb_info->dwb_pipe_inst = 0;
|
||||
wb_info->dwb_params.dwbscl_black_color = 0;
|
||||
wb_info->dwb_params.hdr_mult = 0x1F000;
|
||||
wb_info->dwb_params.csc_params.gamut_adjust_type = CM_GAMUT_ADJUST_TYPE_BYPASS;
|
||||
wb_info->dwb_params.csc_params.gamut_coef_format = CM_GAMUT_REMAP_COEF_FORMAT_S2_13;
|
||||
wb_info->dwb_params.output_depth = DWB_OUTPUT_PIXEL_DEPTH_10BPC;
|
||||
wb_info->dwb_params.cnv_params.cnv_out_bpc = DWB_CNV_OUT_BPC_10BPC;
|
||||
|
||||
/* width & height from crtc */
|
||||
wb_info->dwb_params.cnv_params.src_width = acrtc->base.mode.crtc_hdisplay;
|
||||
wb_info->dwb_params.cnv_params.src_height = acrtc->base.mode.crtc_vdisplay;
|
||||
wb_info->dwb_params.dest_width = acrtc->base.mode.crtc_hdisplay;
|
||||
wb_info->dwb_params.dest_height = acrtc->base.mode.crtc_vdisplay;
|
||||
|
||||
wb_info->dwb_params.cnv_params.crop_en = false;
|
||||
wb_info->dwb_params.stereo_params.stereo_enabled = false;
|
||||
|
||||
wb_info->dwb_params.cnv_params.out_max_pix_val = 0x3ff; // 10 bits
|
||||
wb_info->dwb_params.cnv_params.out_min_pix_val = 0;
|
||||
wb_info->dwb_params.cnv_params.fc_out_format = DWB_OUT_FORMAT_32BPP_ARGB;
|
||||
wb_info->dwb_params.cnv_params.out_denorm_mode = DWB_OUT_DENORM_BYPASS;
|
||||
|
||||
wb_info->dwb_params.out_format = dwb_scaler_mode_bypass444;
|
||||
|
||||
wb_info->dwb_params.capture_rate = dwb_capture_rate_0;
|
||||
|
||||
wb_info->dwb_params.scaler_taps.h_taps = 4;
|
||||
wb_info->dwb_params.scaler_taps.v_taps = 4;
|
||||
wb_info->dwb_params.scaler_taps.h_taps_c = 2;
|
||||
wb_info->dwb_params.scaler_taps.v_taps_c = 2;
|
||||
wb_info->dwb_params.subsample_position = DWB_INTERSTITIAL_SUBSAMPLING;
|
||||
|
||||
wb_info->mcif_buf_params.luma_pitch = afb->base.pitches[0];
|
||||
wb_info->mcif_buf_params.chroma_pitch = afb->base.pitches[1];
|
||||
|
||||
for (i = 0; i < DWB_MCIF_BUF_COUNT; i++) {
|
||||
wb_info->mcif_buf_params.luma_address[i] = afb->address;
|
||||
wb_info->mcif_buf_params.chroma_address[i] = 0;
|
||||
}
|
||||
|
||||
wb_info->mcif_buf_params.p_vmid = 1;
|
||||
if (adev->ip_versions[DCE_HWIP][0] >= IP_VERSION(3, 0, 0)) {
|
||||
wb_info->mcif_warmup_params.start_address.quad_part = afb->address;
|
||||
wb_info->mcif_warmup_params.region_size =
|
||||
wb_info->mcif_buf_params.luma_pitch * wb_info->dwb_params.dest_height;
|
||||
}
|
||||
wb_info->mcif_warmup_params.p_vmid = 1;
|
||||
wb_info->writeback_source_plane = pipe->plane_state;
|
||||
|
||||
dc_stream_add_writeback(dm->dc, crtc_state->stream, wb_info);
|
||||
|
||||
acrtc->wb_pending = true;
|
||||
acrtc->wb_conn = wb_conn;
|
||||
drm_writeback_queue_job(wb_conn, new_con_state);
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
|
||||
* @state: The atomic state to commit
|
||||
@ -8753,7 +8999,12 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
|
||||
for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
|
||||
struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
|
||||
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
|
||||
struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
|
||||
struct amdgpu_dm_connector *aconnector;
|
||||
|
||||
if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
|
||||
continue;
|
||||
|
||||
aconnector = to_amdgpu_dm_connector(connector);
|
||||
|
||||
if (!adev->dm.hdcp_workqueue)
|
||||
continue;
|
||||
@ -9030,6 +9281,31 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
|
||||
amdgpu_dm_commit_planes(state, dev, dm, crtc, wait_for_vblank);
|
||||
}
|
||||
|
||||
/* Enable writeback */
|
||||
for_each_new_connector_in_state(state, connector, new_con_state, i) {
|
||||
struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
|
||||
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
|
||||
|
||||
if (connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK)
|
||||
continue;
|
||||
|
||||
if (!new_con_state->writeback_job)
|
||||
continue;
|
||||
|
||||
new_crtc_state = NULL;
|
||||
|
||||
if (acrtc)
|
||||
new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
|
||||
|
||||
if (acrtc->wb_enabled)
|
||||
continue;
|
||||
|
||||
dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
|
||||
|
||||
dm_set_writeback(dm, dm_new_crtc_state, connector, new_con_state);
|
||||
acrtc->wb_enabled = true;
|
||||
}
|
||||
|
||||
/* Update audio instances for each connector. */
|
||||
amdgpu_dm_commit_audio(dev, state);
|
||||
|
||||
@ -9147,10 +9423,15 @@ out:
|
||||
void dm_restore_drm_connector_state(struct drm_device *dev,
|
||||
struct drm_connector *connector)
|
||||
{
|
||||
struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
|
||||
struct amdgpu_dm_connector *aconnector;
|
||||
struct amdgpu_crtc *disconnected_acrtc;
|
||||
struct dm_crtc_state *acrtc_state;
|
||||
|
||||
if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
|
||||
return;
|
||||
|
||||
aconnector = to_amdgpu_dm_connector(connector);
|
||||
|
||||
if (!aconnector->dc_sink || !connector->state || !connector->encoder)
|
||||
return;
|
||||
|
||||
@ -9227,12 +9508,16 @@ static void get_freesync_config_for_crtc(
|
||||
struct dm_connector_state *new_con_state)
|
||||
{
|
||||
struct mod_freesync_config config = {0};
|
||||
struct amdgpu_dm_connector *aconnector =
|
||||
to_amdgpu_dm_connector(new_con_state->base.connector);
|
||||
struct amdgpu_dm_connector *aconnector;
|
||||
struct drm_display_mode *mode = &new_crtc_state->base.mode;
|
||||
int vrefresh = drm_mode_vrefresh(mode);
|
||||
bool fs_vid_mode = false;
|
||||
|
||||
if (new_con_state->base.connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
|
||||
return;
|
||||
|
||||
aconnector = to_amdgpu_dm_connector(new_con_state->base.connector);
|
||||
|
||||
new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
|
||||
vrefresh >= aconnector->min_vfreq &&
|
||||
vrefresh <= aconnector->max_vfreq;
|
||||
@ -9332,6 +9617,7 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
|
||||
* update changed items
|
||||
*/
|
||||
struct amdgpu_crtc *acrtc = NULL;
|
||||
struct drm_connector *connector = NULL;
|
||||
struct amdgpu_dm_connector *aconnector = NULL;
|
||||
struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
|
||||
struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
|
||||
@ -9341,15 +9627,17 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
|
||||
dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
|
||||
dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
|
||||
acrtc = to_amdgpu_crtc(crtc);
|
||||
aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
|
||||
connector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
|
||||
if (connector)
|
||||
aconnector = to_amdgpu_dm_connector(connector);
|
||||
|
||||
/* TODO This hack should go away */
|
||||
if (aconnector && enable) {
|
||||
if (connector && enable) {
|
||||
/* Make sure fake sink is created in plug-in scenario */
|
||||
drm_new_conn_state = drm_atomic_get_new_connector_state(state,
|
||||
&aconnector->base);
|
||||
connector);
|
||||
drm_old_conn_state = drm_atomic_get_old_connector_state(state,
|
||||
&aconnector->base);
|
||||
connector);
|
||||
|
||||
if (IS_ERR(drm_new_conn_state)) {
|
||||
ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
|
||||
@ -9496,7 +9784,7 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
|
||||
* added MST connectors not found in existing crtc_state in the chained mode
|
||||
* TODO: need to dig out the root cause of that
|
||||
*/
|
||||
if (!aconnector)
|
||||
if (!connector)
|
||||
goto skip_modeset;
|
||||
|
||||
if (modereset_required(new_crtc_state))
|
||||
@ -9539,7 +9827,7 @@ skip_modeset:
|
||||
* We want to do dc stream updates that do not require a
|
||||
* full modeset below.
|
||||
*/
|
||||
if (!(enable && aconnector && new_crtc_state->active))
|
||||
if (!(enable && connector && new_crtc_state->active))
|
||||
return 0;
|
||||
/*
|
||||
* Given above conditions, the dc state cannot be NULL because:
|
||||
@ -10062,6 +10350,9 @@ static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm
|
||||
if (conn_state->crtc != crtc)
|
||||
continue;
|
||||
|
||||
if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
|
||||
continue;
|
||||
|
||||
aconnector = to_amdgpu_dm_connector(connector);
|
||||
if (!aconnector->mst_output_port || !aconnector->mst_root)
|
||||
aconnector = NULL;
|
||||
|
@ -32,6 +32,7 @@
|
||||
#include <drm/drm_crtc.h>
|
||||
#include <drm/drm_plane.h>
|
||||
#include "link_service_types.h"
|
||||
#include <drm/drm_writeback.h>
|
||||
|
||||
/*
|
||||
* This file contains the definition for amdgpu_display_manager
|
||||
@ -714,6 +715,13 @@ static inline void amdgpu_dm_set_mst_status(uint8_t *status,
|
||||
|
||||
#define to_amdgpu_dm_connector(x) container_of(x, struct amdgpu_dm_connector, base)
|
||||
|
||||
struct amdgpu_dm_wb_connector {
|
||||
struct drm_writeback_connector base;
|
||||
struct dc_link *link;
|
||||
};
|
||||
|
||||
#define to_amdgpu_dm_wb_connector(x) container_of(x, struct amdgpu_dm_wb_connector, base)
|
||||
|
||||
extern const struct amdgpu_ip_block_version dm_ip_block;
|
||||
|
||||
struct dm_plane_state {
|
||||
@ -834,7 +842,7 @@ struct dc_stream_state *
|
||||
int dm_atomic_get_state(struct drm_atomic_state *state,
|
||||
struct dm_atomic_state **dm_state);
|
||||
|
||||
struct amdgpu_dm_connector *
|
||||
struct drm_connector *
|
||||
amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
|
||||
struct drm_crtc *crtc);
|
||||
|
||||
|
@ -326,6 +326,9 @@ int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name)
|
||||
if (!connector->state || connector->state->crtc != crtc)
|
||||
continue;
|
||||
|
||||
if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
|
||||
continue;
|
||||
|
||||
aconn = to_amdgpu_dm_connector(connector);
|
||||
break;
|
||||
}
|
||||
|
@ -894,10 +894,15 @@ void amdgpu_dm_hpd_init(struct amdgpu_device *adev)
|
||||
|
||||
drm_connector_list_iter_begin(dev, &iter);
|
||||
drm_for_each_connector_iter(connector, &iter) {
|
||||
struct amdgpu_dm_connector *amdgpu_dm_connector =
|
||||
to_amdgpu_dm_connector(connector);
|
||||
struct amdgpu_dm_connector *amdgpu_dm_connector;
|
||||
const struct dc_link *dc_link;
|
||||
|
||||
const struct dc_link *dc_link = amdgpu_dm_connector->dc_link;
|
||||
if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
|
||||
continue;
|
||||
|
||||
amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
|
||||
|
||||
dc_link = amdgpu_dm_connector->dc_link;
|
||||
|
||||
if (dc_link->irq_source_hpd != DC_IRQ_SOURCE_INVALID) {
|
||||
dc_interrupt_set(adev->dm.dc,
|
||||
@ -930,9 +935,14 @@ void amdgpu_dm_hpd_fini(struct amdgpu_device *adev)
|
||||
|
||||
drm_connector_list_iter_begin(dev, &iter);
|
||||
drm_for_each_connector_iter(connector, &iter) {
|
||||
struct amdgpu_dm_connector *amdgpu_dm_connector =
|
||||
to_amdgpu_dm_connector(connector);
|
||||
const struct dc_link *dc_link = amdgpu_dm_connector->dc_link;
|
||||
struct amdgpu_dm_connector *amdgpu_dm_connector;
|
||||
const struct dc_link *dc_link;
|
||||
|
||||
if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
|
||||
continue;
|
||||
|
||||
amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
|
||||
dc_link = amdgpu_dm_connector->dc_link;
|
||||
|
||||
if (dc_link->irq_source_hpd != DC_IRQ_SOURCE_INVALID) {
|
||||
dc_interrupt_set(adev->dm.dc,
|
||||
|
@ -1500,14 +1500,16 @@ int pre_validate_dsc(struct drm_atomic_state *state,
|
||||
int ind = find_crtc_index_in_state_by_stream(state, stream);
|
||||
|
||||
if (ind >= 0) {
|
||||
struct drm_connector *connector;
|
||||
struct amdgpu_dm_connector *aconnector;
|
||||
struct drm_connector_state *drm_new_conn_state;
|
||||
struct dm_connector_state *dm_new_conn_state;
|
||||
struct dm_crtc_state *dm_old_crtc_state;
|
||||
|
||||
aconnector =
|
||||
connector =
|
||||
amdgpu_dm_find_first_crtc_matching_connector(state,
|
||||
state->crtcs[ind].ptr);
|
||||
aconnector = to_amdgpu_dm_connector(connector);
|
||||
drm_new_conn_state =
|
||||
drm_atomic_get_new_connector_state(state,
|
||||
&aconnector->base);
|
||||
|
215
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_wb.c
Normal file
215
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_wb.c
Normal file
@ -0,0 +1,215 @@
|
||||
// SPDX-License-Identifier: MIT
|
||||
/*
|
||||
* Copyright 2022 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: AMD
|
||||
*
|
||||
*/
|
||||
|
||||
#include "dm_services_types.h"
|
||||
|
||||
#include "amdgpu.h"
|
||||
#include "amdgpu_dm.h"
|
||||
#include "amdgpu_dm_wb.h"
|
||||
#include "amdgpu_display.h"
|
||||
#include "dc.h"
|
||||
|
||||
#include <drm/drm_atomic_state_helper.h>
|
||||
#include <drm/drm_modeset_helper_vtables.h>
|
||||
|
||||
static const u32 amdgpu_dm_wb_formats[] = {
|
||||
DRM_FORMAT_XRGB2101010,
|
||||
};
|
||||
|
||||
static int amdgpu_dm_wb_encoder_atomic_check(struct drm_encoder *encoder,
|
||||
struct drm_crtc_state *crtc_state,
|
||||
struct drm_connector_state *conn_state)
|
||||
{
|
||||
struct drm_framebuffer *fb;
|
||||
const struct drm_display_mode *mode = &crtc_state->mode;
|
||||
bool found = false;
|
||||
uint8_t i;
|
||||
|
||||
if (!conn_state->writeback_job || !conn_state->writeback_job->fb)
|
||||
return 0;
|
||||
|
||||
fb = conn_state->writeback_job->fb;
|
||||
if (fb->width != mode->hdisplay || fb->height != mode->vdisplay) {
|
||||
DRM_DEBUG_KMS("Invalid framebuffer size %ux%u\n",
|
||||
fb->width, fb->height);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
for (i = 0; i < sizeof(amdgpu_dm_wb_formats) / sizeof(u32); i++) {
|
||||
if (fb->format->format == amdgpu_dm_wb_formats[i])
|
||||
found = true;
|
||||
}
|
||||
|
||||
if (!found) {
|
||||
DRM_DEBUG_KMS("Invalid pixel format %p4cc\n",
|
||||
&fb->format->format);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
static int amdgpu_dm_wb_connector_get_modes(struct drm_connector *connector)
|
||||
{
|
||||
struct drm_device *dev = connector->dev;
|
||||
|
||||
return drm_add_modes_noedid(connector, dev->mode_config.max_width,
|
||||
dev->mode_config.max_height);
|
||||
}
|
||||
|
||||
static int amdgpu_dm_wb_prepare_job(struct drm_writeback_connector *wb_connector,
|
||||
struct drm_writeback_job *job)
|
||||
{
|
||||
struct amdgpu_framebuffer *afb;
|
||||
struct drm_gem_object *obj;
|
||||
struct amdgpu_device *adev;
|
||||
struct amdgpu_bo *rbo;
|
||||
uint32_t domain;
|
||||
int r;
|
||||
|
||||
if (!job->fb) {
|
||||
DRM_DEBUG_KMS("No FB bound\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
afb = to_amdgpu_framebuffer(job->fb);
|
||||
obj = job->fb->obj[0];
|
||||
rbo = gem_to_amdgpu_bo(obj);
|
||||
adev = amdgpu_ttm_adev(rbo->tbo.bdev);
|
||||
|
||||
r = amdgpu_bo_reserve(rbo, true);
|
||||
if (r) {
|
||||
dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
|
||||
return r;
|
||||
}
|
||||
|
||||
r = dma_resv_reserve_fences(rbo->tbo.base.resv, 1);
|
||||
if (r) {
|
||||
dev_err(adev->dev, "reserving fence slot failed (%d)\n", r);
|
||||
goto error_unlock;
|
||||
}
|
||||
|
||||
domain = amdgpu_display_supported_domains(adev, rbo->flags);
|
||||
|
||||
r = amdgpu_bo_pin(rbo, domain);
|
||||
if (unlikely(r != 0)) {
|
||||
if (r != -ERESTARTSYS)
|
||||
DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
|
||||
goto error_unlock;
|
||||
}
|
||||
|
||||
r = amdgpu_ttm_alloc_gart(&rbo->tbo);
|
||||
if (unlikely(r != 0)) {
|
||||
DRM_ERROR("%p bind failed\n", rbo);
|
||||
goto error_unpin;
|
||||
}
|
||||
|
||||
amdgpu_bo_unreserve(rbo);
|
||||
|
||||
afb->address = amdgpu_bo_gpu_offset(rbo);
|
||||
|
||||
amdgpu_bo_ref(rbo);
|
||||
|
||||
return 0;
|
||||
|
||||
error_unpin:
|
||||
amdgpu_bo_unpin(rbo);
|
||||
|
||||
error_unlock:
|
||||
amdgpu_bo_unreserve(rbo);
|
||||
return r;
|
||||
}
|
||||
|
||||
static void amdgpu_dm_wb_cleanup_job(struct drm_writeback_connector *connector,
|
||||
struct drm_writeback_job *job)
|
||||
{
|
||||
struct amdgpu_bo *rbo;
|
||||
int r;
|
||||
|
||||
if (!job->fb)
|
||||
return;
|
||||
|
||||
rbo = gem_to_amdgpu_bo(job->fb->obj[0]);
|
||||
r = amdgpu_bo_reserve(rbo, false);
|
||||
if (unlikely(r)) {
|
||||
DRM_ERROR("failed to reserve rbo before unpin\n");
|
||||
return;
|
||||
}
|
||||
|
||||
amdgpu_bo_unpin(rbo);
|
||||
amdgpu_bo_unreserve(rbo);
|
||||
amdgpu_bo_unref(&rbo);
|
||||
}
|
||||
|
||||
static const struct drm_encoder_helper_funcs amdgpu_dm_wb_encoder_helper_funcs = {
|
||||
.atomic_check = amdgpu_dm_wb_encoder_atomic_check,
|
||||
};
|
||||
|
||||
static const struct drm_connector_funcs amdgpu_dm_wb_connector_funcs = {
|
||||
.fill_modes = drm_helper_probe_single_connector_modes,
|
||||
.destroy = drm_connector_cleanup,
|
||||
.reset = amdgpu_dm_connector_funcs_reset,
|
||||
.atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
|
||||
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
|
||||
};
|
||||
|
||||
static const struct drm_connector_helper_funcs amdgpu_dm_wb_conn_helper_funcs = {
|
||||
.get_modes = amdgpu_dm_wb_connector_get_modes,
|
||||
.prepare_writeback_job = amdgpu_dm_wb_prepare_job,
|
||||
.cleanup_writeback_job = amdgpu_dm_wb_cleanup_job,
|
||||
};
|
||||
|
||||
int amdgpu_dm_wb_connector_init(struct amdgpu_display_manager *dm,
|
||||
struct amdgpu_dm_wb_connector *wbcon,
|
||||
uint32_t link_index)
|
||||
{
|
||||
struct dc *dc = dm->dc;
|
||||
struct dc_link *link = dc_get_link_at_index(dc, link_index);
|
||||
int res = 0;
|
||||
|
||||
wbcon->link = link;
|
||||
|
||||
drm_connector_helper_add(&wbcon->base.base, &amdgpu_dm_wb_conn_helper_funcs);
|
||||
|
||||
res = drm_writeback_connector_init(&dm->adev->ddev, &wbcon->base,
|
||||
&amdgpu_dm_wb_connector_funcs,
|
||||
&amdgpu_dm_wb_encoder_helper_funcs,
|
||||
amdgpu_dm_wb_formats,
|
||||
ARRAY_SIZE(amdgpu_dm_wb_formats),
|
||||
amdgpu_dm_get_encoder_crtc_mask(dm->adev));
|
||||
|
||||
if (res)
|
||||
return res;
|
||||
/*
|
||||
* Some of the properties below require access to state, like bpc.
|
||||
* Allocate some default initial connector state with our reset helper.
|
||||
*/
|
||||
if (wbcon->base.base.funcs->reset)
|
||||
wbcon->base.base.funcs->reset(&wbcon->base.base);
|
||||
|
||||
return 0;
|
||||
}
|
36
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_wb.h
Normal file
36
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_wb.h
Normal file
@ -0,0 +1,36 @@
|
||||
/* SPDX-License-Identifier: MIT */
|
||||
/*
|
||||
* Copyright 2022 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: AMD
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef __AMDGPU_DM_WB_H__
|
||||
#define __AMDGPU_DM_WB_H__
|
||||
|
||||
#include <drm/drm_writeback.h>
|
||||
|
||||
int amdgpu_dm_wb_connector_init(struct amdgpu_display_manager *dm,
|
||||
struct amdgpu_dm_wb_connector *dm_wbcon,
|
||||
uint32_t link_index);
|
||||
|
||||
#endif
|
@ -1691,7 +1691,7 @@ static enum bp_result bios_parser_enable_disp_power_gating(
|
||||
static enum bp_result bios_parser_enable_lvtma_control(
|
||||
struct dc_bios *dcb,
|
||||
uint8_t uc_pwr_on,
|
||||
uint8_t panel_instance,
|
||||
uint8_t pwrseq_instance,
|
||||
uint8_t bypass_panel_control_wait)
|
||||
{
|
||||
struct bios_parser *bp = BP_FROM_DCB(dcb);
|
||||
@ -1699,7 +1699,7 @@ static enum bp_result bios_parser_enable_lvtma_control(
|
||||
if (!bp->cmd_tbl.enable_lvtma_control)
|
||||
return BP_RESULT_FAILURE;
|
||||
|
||||
return bp->cmd_tbl.enable_lvtma_control(bp, uc_pwr_on, panel_instance, bypass_panel_control_wait);
|
||||
return bp->cmd_tbl.enable_lvtma_control(bp, uc_pwr_on, pwrseq_instance, bypass_panel_control_wait);
|
||||
}
|
||||
|
||||
static bool bios_parser_is_accelerated_mode(
|
||||
@ -1747,6 +1747,7 @@ static enum bp_result bios_parser_get_firmware_info(
|
||||
result = get_firmware_info_v3_2(bp, info);
|
||||
break;
|
||||
case 4:
|
||||
case 5:
|
||||
result = get_firmware_info_v3_4(bp, info);
|
||||
break;
|
||||
default:
|
||||
@ -2214,22 +2215,22 @@ static enum bp_result bios_parser_get_disp_connector_caps_info(
|
||||
|
||||
switch (bp->object_info_tbl.revision.minor) {
|
||||
case 4:
|
||||
default:
|
||||
object = get_bios_object(bp, object_id);
|
||||
default:
|
||||
object = get_bios_object(bp, object_id);
|
||||
|
||||
if (!object)
|
||||
return BP_RESULT_BADINPUT;
|
||||
if (!object)
|
||||
return BP_RESULT_BADINPUT;
|
||||
|
||||
record = get_disp_connector_caps_record(bp, object);
|
||||
if (!record)
|
||||
return BP_RESULT_NORECORD;
|
||||
record = get_disp_connector_caps_record(bp, object);
|
||||
if (!record)
|
||||
return BP_RESULT_NORECORD;
|
||||
|
||||
info->INTERNAL_DISPLAY =
|
||||
(record->connectcaps & ATOM_CONNECTOR_CAP_INTERNAL_DISPLAY) ? 1 : 0;
|
||||
info->INTERNAL_DISPLAY_BL =
|
||||
(record->connectcaps & ATOM_CONNECTOR_CAP_INTERNAL_DISPLAY_BL) ? 1 : 0;
|
||||
break;
|
||||
case 5:
|
||||
info->INTERNAL_DISPLAY =
|
||||
(record->connectcaps & ATOM_CONNECTOR_CAP_INTERNAL_DISPLAY) ? 1 : 0;
|
||||
info->INTERNAL_DISPLAY_BL =
|
||||
(record->connectcaps & ATOM_CONNECTOR_CAP_INTERNAL_DISPLAY_BL) ? 1 : 0;
|
||||
break;
|
||||
case 5:
|
||||
object_path_v3 = get_bios_object_from_path_v3(bp, object_id);
|
||||
|
||||
if (!object_path_v3)
|
||||
@ -3329,27 +3330,28 @@ static enum bp_result get_bracket_layout_record(
|
||||
DC_LOG_DETECTION_EDID_PARSER("Invalid slot_layout_info\n");
|
||||
return BP_RESULT_BADINPUT;
|
||||
}
|
||||
|
||||
tbl = &bp->object_info_tbl;
|
||||
v1_4 = tbl->v1_4;
|
||||
v1_5 = tbl->v1_5;
|
||||
|
||||
result = BP_RESULT_NORECORD;
|
||||
switch (bp->object_info_tbl.revision.minor) {
|
||||
case 4:
|
||||
default:
|
||||
for (i = 0; i < v1_4->number_of_path; ++i) {
|
||||
if (bracket_layout_id ==
|
||||
v1_4->display_path[i].display_objid) {
|
||||
result = update_slot_layout_info(dcb, i, slot_layout_info);
|
||||
break;
|
||||
}
|
||||
case 4:
|
||||
default:
|
||||
for (i = 0; i < v1_4->number_of_path; ++i) {
|
||||
if (bracket_layout_id == v1_4->display_path[i].display_objid) {
|
||||
result = update_slot_layout_info(dcb, i, slot_layout_info);
|
||||
break;
|
||||
}
|
||||
break;
|
||||
case 5:
|
||||
for (i = 0; i < v1_5->number_of_path; ++i)
|
||||
result = update_slot_layout_info_v2(dcb, i, slot_layout_info);
|
||||
break;
|
||||
}
|
||||
break;
|
||||
case 5:
|
||||
for (i = 0; i < v1_5->number_of_path; ++i)
|
||||
result = update_slot_layout_info_v2(dcb, i, slot_layout_info);
|
||||
break;
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
@ -3358,9 +3360,7 @@ static enum bp_result bios_get_board_layout_info(
|
||||
struct board_layout_info *board_layout_info)
|
||||
{
|
||||
unsigned int i;
|
||||
|
||||
struct bios_parser *bp;
|
||||
|
||||
static enum bp_result record_result;
|
||||
unsigned int max_slots;
|
||||
|
||||
@ -3370,7 +3370,6 @@ static enum bp_result bios_get_board_layout_info(
|
||||
0, 0
|
||||
};
|
||||
|
||||
|
||||
bp = BP_FROM_DCB(dcb);
|
||||
|
||||
if (board_layout_info == NULL) {
|
||||
@ -3551,7 +3550,6 @@ static const struct dc_vbios_funcs vbios_funcs = {
|
||||
.bios_parser_destroy = firmware_parser_destroy,
|
||||
|
||||
.get_board_layout_info = bios_get_board_layout_info,
|
||||
/* TODO: use this fn in hw init?*/
|
||||
.pack_data_tables = bios_parser_pack_data_tables,
|
||||
|
||||
.get_atom_dc_golden_table = bios_get_atom_dc_golden_table,
|
||||
|
@ -976,7 +976,7 @@ static unsigned int get_smu_clock_info_v3_1(struct bios_parser *bp, uint8_t id)
|
||||
static enum bp_result enable_lvtma_control(
|
||||
struct bios_parser *bp,
|
||||
uint8_t uc_pwr_on,
|
||||
uint8_t panel_instance,
|
||||
uint8_t pwrseq_instance,
|
||||
uint8_t bypass_panel_control_wait);
|
||||
|
||||
static void init_enable_lvtma_control(struct bios_parser *bp)
|
||||
@ -989,7 +989,7 @@ static void init_enable_lvtma_control(struct bios_parser *bp)
|
||||
static void enable_lvtma_control_dmcub(
|
||||
struct dc_dmub_srv *dmcub,
|
||||
uint8_t uc_pwr_on,
|
||||
uint8_t panel_instance,
|
||||
uint8_t pwrseq_instance,
|
||||
uint8_t bypass_panel_control_wait)
|
||||
{
|
||||
|
||||
@ -1002,8 +1002,8 @@ static void enable_lvtma_control_dmcub(
|
||||
DMUB_CMD__VBIOS_LVTMA_CONTROL;
|
||||
cmd.lvtma_control.data.uc_pwr_action =
|
||||
uc_pwr_on;
|
||||
cmd.lvtma_control.data.panel_inst =
|
||||
panel_instance;
|
||||
cmd.lvtma_control.data.pwrseq_inst =
|
||||
pwrseq_instance;
|
||||
cmd.lvtma_control.data.bypass_panel_control_wait =
|
||||
bypass_panel_control_wait;
|
||||
dm_execute_dmub_cmd(dmcub->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
|
||||
@ -1012,7 +1012,7 @@ static void enable_lvtma_control_dmcub(
|
||||
static enum bp_result enable_lvtma_control(
|
||||
struct bios_parser *bp,
|
||||
uint8_t uc_pwr_on,
|
||||
uint8_t panel_instance,
|
||||
uint8_t pwrseq_instance,
|
||||
uint8_t bypass_panel_control_wait)
|
||||
{
|
||||
enum bp_result result = BP_RESULT_FAILURE;
|
||||
@ -1021,7 +1021,7 @@ static enum bp_result enable_lvtma_control(
|
||||
bp->base.ctx->dc->debug.dmub_command_table) {
|
||||
enable_lvtma_control_dmcub(bp->base.ctx->dmub_srv,
|
||||
uc_pwr_on,
|
||||
panel_instance,
|
||||
pwrseq_instance,
|
||||
bypass_panel_control_wait);
|
||||
return BP_RESULT_OK;
|
||||
}
|
||||
|
@ -96,7 +96,7 @@ struct cmd_tbl {
|
||||
struct bios_parser *bp, uint8_t id);
|
||||
enum bp_result (*enable_lvtma_control)(struct bios_parser *bp,
|
||||
uint8_t uc_pwr_on,
|
||||
uint8_t panel_instance,
|
||||
uint8_t pwrseq_instance,
|
||||
uint8_t bypass_panel_control_wait);
|
||||
};
|
||||
|
||||
|
@ -460,18 +460,24 @@ static int dcn32_get_dispclk_from_dentist(struct clk_mgr *clk_mgr_base)
|
||||
|
||||
static void dcn32_auto_dpm_test_log(struct dc_clocks *new_clocks, struct clk_mgr_internal *clk_mgr)
|
||||
{
|
||||
unsigned int dispclk_khz_reg = REG_READ(CLK1_CLK0_CURRENT_CNT); // DISPCLK
|
||||
unsigned int dppclk_khz_reg = REG_READ(CLK1_CLK1_CURRENT_CNT); // DPPCLK
|
||||
unsigned int dprefclk_khz_reg = REG_READ(CLK1_CLK2_CURRENT_CNT); // DPREFCLK
|
||||
unsigned int dcfclk_khz_reg = REG_READ(CLK1_CLK3_CURRENT_CNT); // DCFCLK
|
||||
unsigned int dtbclk_khz_reg = REG_READ(CLK1_CLK4_CURRENT_CNT); // DTBCLK
|
||||
unsigned int fclk_khz_reg = REG_READ(CLK4_CLK0_CURRENT_CNT); // FCLK
|
||||
unsigned int dispclk_khz_reg, dppclk_khz_reg, dprefclk_khz_reg, dcfclk_khz_reg, dtbclk_khz_reg,
|
||||
fclk_khz_reg;
|
||||
int dramclk_khz_override, fclk_khz_override, num_fclk_levels;
|
||||
|
||||
msleep(5);
|
||||
|
||||
dispclk_khz_reg = REG_READ(CLK1_CLK0_CURRENT_CNT); // DISPCLK
|
||||
dppclk_khz_reg = REG_READ(CLK1_CLK1_CURRENT_CNT); // DPPCLK
|
||||
dprefclk_khz_reg = REG_READ(CLK1_CLK2_CURRENT_CNT); // DPREFCLK
|
||||
dcfclk_khz_reg = REG_READ(CLK1_CLK3_CURRENT_CNT); // DCFCLK
|
||||
dtbclk_khz_reg = REG_READ(CLK1_CLK4_CURRENT_CNT); // DTBCLK
|
||||
fclk_khz_reg = REG_READ(CLK4_CLK0_CURRENT_CNT); // FCLK
|
||||
|
||||
// Overrides for these clocks in case there is no p_state change support
|
||||
int dramclk_khz_override = new_clocks->dramclk_khz;
|
||||
int fclk_khz_override = new_clocks->fclk_khz;
|
||||
dramclk_khz_override = new_clocks->dramclk_khz;
|
||||
fclk_khz_override = new_clocks->fclk_khz;
|
||||
|
||||
int num_fclk_levels = clk_mgr->base.bw_params->clk_table.num_entries_per_clk.num_fclk_levels - 1;
|
||||
num_fclk_levels = clk_mgr->base.bw_params->clk_table.num_entries_per_clk.num_fclk_levels - 1;
|
||||
|
||||
if (!new_clocks->p_state_change_support) {
|
||||
dramclk_khz_override = clk_mgr->base.bw_params->max_memclk_mhz * 1000;
|
||||
@ -707,7 +713,7 @@ static void dcn32_update_clocks(struct clk_mgr *clk_mgr_base,
|
||||
dmcu->funcs->set_psr_wait_loop(dmcu,
|
||||
clk_mgr_base->clks.dispclk_khz / 1000 / 7);
|
||||
|
||||
if (dc->config.enable_auto_dpm_test_logs) {
|
||||
if (dc->config.enable_auto_dpm_test_logs && safe_to_lower) {
|
||||
dcn32_auto_dpm_test_log(new_clocks, clk_mgr);
|
||||
}
|
||||
}
|
||||
|
@ -80,12 +80,12 @@
|
||||
|
||||
static int dcn35_get_active_display_cnt_wa(
|
||||
struct dc *dc,
|
||||
struct dc_state *context)
|
||||
struct dc_state *context,
|
||||
int *all_active_disps)
|
||||
{
|
||||
int i, display_count;
|
||||
int i, display_count = 0;
|
||||
bool tmds_present = false;
|
||||
|
||||
display_count = 0;
|
||||
for (i = 0; i < context->stream_count; i++) {
|
||||
const struct dc_stream_state *stream = context->streams[i];
|
||||
|
||||
@ -103,7 +103,8 @@ static int dcn35_get_active_display_cnt_wa(
|
||||
link->link_enc->funcs->is_dig_enabled(link->link_enc))
|
||||
display_count++;
|
||||
}
|
||||
|
||||
if (all_active_disps != NULL)
|
||||
*all_active_disps = display_count;
|
||||
/* WA for hang on HDMI after display off back on*/
|
||||
if (display_count == 0 && tmds_present)
|
||||
display_count = 1;
|
||||
@ -126,21 +127,13 @@ static void dcn35_disable_otg_wa(struct clk_mgr *clk_mgr_base, struct dc_state *
|
||||
continue;
|
||||
if (pipe->stream && (pipe->stream->dpms_off || dc_is_virtual_signal(pipe->stream->signal) ||
|
||||
!pipe->stream->link_enc)) {
|
||||
struct stream_encoder *stream_enc = pipe->stream_res.stream_enc;
|
||||
|
||||
if (disable) {
|
||||
if (stream_enc && stream_enc->funcs->disable_fifo)
|
||||
pipe->stream_res.stream_enc->funcs->disable_fifo(stream_enc);
|
||||
|
||||
if (pipe->stream_res.tg && pipe->stream_res.tg->funcs->immediate_disable_crtc)
|
||||
pipe->stream_res.tg->funcs->immediate_disable_crtc(pipe->stream_res.tg);
|
||||
|
||||
reset_sync_context_for_pipe(dc, context, i);
|
||||
} else {
|
||||
pipe->stream_res.tg->funcs->enable_crtc(pipe->stream_res.tg);
|
||||
|
||||
if (stream_enc && stream_enc->funcs->enable_fifo)
|
||||
pipe->stream_res.stream_enc->funcs->enable_fifo(stream_enc);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -224,15 +217,16 @@ void dcn35_update_clocks(struct clk_mgr *clk_mgr_base,
|
||||
struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
|
||||
struct dc_clocks *new_clocks = &context->bw_ctx.bw.dcn.clk;
|
||||
struct dc *dc = clk_mgr_base->ctx->dc;
|
||||
int display_count;
|
||||
int display_count = 0;
|
||||
bool update_dppclk = false;
|
||||
bool update_dispclk = false;
|
||||
bool dpp_clock_lowered = false;
|
||||
int all_active_disps = 0;
|
||||
|
||||
if (dc->work_arounds.skip_clock_update)
|
||||
return;
|
||||
|
||||
/* DTBCLK is fixed, so set a default if unspecified. */
|
||||
display_count = dcn35_get_active_display_cnt_wa(dc, context, &all_active_disps);
|
||||
if (new_clocks->dtbclk_en && !new_clocks->ref_dtbclk_khz)
|
||||
new_clocks->ref_dtbclk_khz = 600000;
|
||||
|
||||
@ -254,7 +248,6 @@ void dcn35_update_clocks(struct clk_mgr *clk_mgr_base,
|
||||
}
|
||||
/* check that we're not already in lower */
|
||||
if (clk_mgr_base->clks.pwr_state != DCN_PWR_STATE_LOW_POWER) {
|
||||
display_count = dcn35_get_active_display_cnt_wa(dc, context);
|
||||
/* if we can go lower, go lower */
|
||||
if (display_count == 0)
|
||||
clk_mgr_base->clks.pwr_state = DCN_PWR_STATE_LOW_POWER;
|
||||
@ -826,7 +819,7 @@ static void dcn35_set_low_power_state(struct clk_mgr *clk_mgr_base)
|
||||
struct dc_state *context = dc->current_state;
|
||||
|
||||
if (clk_mgr_base->clks.pwr_state != DCN_PWR_STATE_LOW_POWER) {
|
||||
display_count = dcn35_get_active_display_cnt_wa(dc, context);
|
||||
display_count = dcn35_get_active_display_cnt_wa(dc, context, NULL);
|
||||
/* if we can go lower, go lower */
|
||||
if (display_count == 0)
|
||||
clk_mgr_base->clks.pwr_state = DCN_PWR_STATE_LOW_POWER;
|
||||
|
@ -279,7 +279,7 @@ void dcn35_smu_set_display_idle_optimization(struct clk_mgr_internal *clk_mgr, u
|
||||
clk_mgr,
|
||||
VBIOSSMC_MSG_SetDisplayIdleOptimizations,
|
||||
idle_info);
|
||||
smu_print("VBIOSSMC_MSG_SetDisplayIdleOptimizations idle_info = %d\n", idle_info);
|
||||
smu_print("%s: VBIOSSMC_MSG_SetDisplayIdleOptimizations idle_info = %x\n", __func__, idle_info);
|
||||
}
|
||||
|
||||
void dcn35_smu_enable_phy_refclk_pwrdwn(struct clk_mgr_internal *clk_mgr, bool enable)
|
||||
@ -298,7 +298,7 @@ void dcn35_smu_enable_phy_refclk_pwrdwn(struct clk_mgr_internal *clk_mgr, bool e
|
||||
clk_mgr,
|
||||
VBIOSSMC_MSG_SetDisplayIdleOptimizations,
|
||||
idle_info.data);
|
||||
smu_print("dcn35_smu_enable_phy_refclk_pwrdwn = %d\n", enable ? 1 : 0);
|
||||
smu_print("%s smu_enable_phy_refclk_pwrdwn = %d\n", __func__, enable ? 1 : 0);
|
||||
}
|
||||
|
||||
void dcn35_smu_enable_pme_wa(struct clk_mgr_internal *clk_mgr)
|
||||
@ -310,6 +310,7 @@ void dcn35_smu_enable_pme_wa(struct clk_mgr_internal *clk_mgr)
|
||||
clk_mgr,
|
||||
VBIOSSMC_MSG_UpdatePmeRestore,
|
||||
0);
|
||||
smu_print("%s: SMC_MSG_UpdatePmeRestore\n", __func__);
|
||||
}
|
||||
|
||||
void dcn35_smu_set_dram_addr_high(struct clk_mgr_internal *clk_mgr, uint32_t addr_high)
|
||||
@ -350,7 +351,7 @@ void dcn35_smu_transfer_wm_table_dram_2_smu(struct clk_mgr_internal *clk_mgr)
|
||||
|
||||
void dcn35_smu_set_zstate_support(struct clk_mgr_internal *clk_mgr, enum dcn_zstate_support_state support)
|
||||
{
|
||||
unsigned int msg_id, param;
|
||||
unsigned int msg_id, param, retv;
|
||||
|
||||
if (!clk_mgr->smu_present)
|
||||
return;
|
||||
@ -360,27 +361,32 @@ void dcn35_smu_set_zstate_support(struct clk_mgr_internal *clk_mgr, enum dcn_zst
|
||||
case DCN_ZSTATE_SUPPORT_ALLOW:
|
||||
msg_id = VBIOSSMC_MSG_AllowZstatesEntry;
|
||||
param = (1 << 10) | (1 << 9) | (1 << 8);
|
||||
smu_print("%s: SMC_MSG_AllowZstatesEntr msg = ALLOW, param = %d\n", __func__, param);
|
||||
break;
|
||||
|
||||
case DCN_ZSTATE_SUPPORT_DISALLOW:
|
||||
msg_id = VBIOSSMC_MSG_AllowZstatesEntry;
|
||||
param = 0;
|
||||
smu_print("%s: SMC_MSG_AllowZstatesEntr msg_id = DISALLOW, param = %d\n", __func__, param);
|
||||
break;
|
||||
|
||||
|
||||
case DCN_ZSTATE_SUPPORT_ALLOW_Z10_ONLY:
|
||||
msg_id = VBIOSSMC_MSG_AllowZstatesEntry;
|
||||
param = (1 << 10);
|
||||
smu_print("%s: SMC_MSG_AllowZstatesEntr msg = ALLOW_Z10_ONLY, param = %d\n", __func__, param);
|
||||
break;
|
||||
|
||||
case DCN_ZSTATE_SUPPORT_ALLOW_Z8_Z10_ONLY:
|
||||
msg_id = VBIOSSMC_MSG_AllowZstatesEntry;
|
||||
param = (1 << 10) | (1 << 8);
|
||||
smu_print("%s: SMC_MSG_AllowZstatesEntr msg = ALLOW_Z8_Z10_ONLY, param = %d\n", __func__, param);
|
||||
break;
|
||||
|
||||
case DCN_ZSTATE_SUPPORT_ALLOW_Z8_ONLY:
|
||||
msg_id = VBIOSSMC_MSG_AllowZstatesEntry;
|
||||
param = (1 << 8);
|
||||
smu_print("%s: SMC_MSG_AllowZstatesEntry msg = ALLOW_Z8_ONLY, param = %d\n", __func__, param);
|
||||
break;
|
||||
|
||||
default: //DCN_ZSTATE_SUPPORT_UNKNOWN
|
||||
@ -390,11 +396,11 @@ void dcn35_smu_set_zstate_support(struct clk_mgr_internal *clk_mgr, enum dcn_zst
|
||||
}
|
||||
|
||||
|
||||
dcn35_smu_send_msg_with_param(
|
||||
retv = dcn35_smu_send_msg_with_param(
|
||||
clk_mgr,
|
||||
msg_id,
|
||||
param);
|
||||
smu_print("dcn35_smu_set_zstate_support msg_id = %d, param = %d\n", msg_id, param);
|
||||
smu_print("%s: msg_id = %d, param = 0x%x, return = %d\n", __func__, msg_id, param, retv);
|
||||
}
|
||||
|
||||
int dcn35_smu_get_dprefclk(struct clk_mgr_internal *clk_mgr)
|
||||
@ -408,7 +414,7 @@ int dcn35_smu_get_dprefclk(struct clk_mgr_internal *clk_mgr)
|
||||
VBIOSSMC_MSG_GetDprefclkFreq,
|
||||
0);
|
||||
|
||||
smu_print("dcn35_smu_get_DPREF clk = %d mhz\n", dprefclk);
|
||||
smu_print("%s: SMU DPREF clk = %d mhz\n", __func__, dprefclk);
|
||||
return dprefclk * 1000;
|
||||
}
|
||||
|
||||
@ -423,7 +429,7 @@ int dcn35_smu_get_dtbclk(struct clk_mgr_internal *clk_mgr)
|
||||
VBIOSSMC_MSG_GetDtbclkFreq,
|
||||
0);
|
||||
|
||||
smu_print("dcn35_smu_get_dtbclk = %d mhz\n", dtbclk);
|
||||
smu_print("%s: get_dtbclk = %dmhz\n", __func__, dtbclk);
|
||||
return dtbclk * 1000;
|
||||
}
|
||||
/* Arg = 1: Turn DTB on; 0: Turn DTB CLK OFF. when it is on, it is 600MHZ */
|
||||
@ -436,7 +442,7 @@ void dcn35_smu_set_dtbclk(struct clk_mgr_internal *clk_mgr, bool enable)
|
||||
clk_mgr,
|
||||
VBIOSSMC_MSG_SetDtbClk,
|
||||
enable);
|
||||
smu_print("dcn35_smu_set_dtbclk = %d \n", enable ? 1 : 0);
|
||||
smu_print("%s: smu_set_dtbclk = %d\n", __func__, enable ? 1 : 0);
|
||||
}
|
||||
|
||||
void dcn35_vbios_smu_enable_48mhz_tmdp_refclk_pwrdwn(struct clk_mgr_internal *clk_mgr, bool enable)
|
||||
@ -445,30 +451,45 @@ void dcn35_vbios_smu_enable_48mhz_tmdp_refclk_pwrdwn(struct clk_mgr_internal *cl
|
||||
clk_mgr,
|
||||
VBIOSSMC_MSG_EnableTmdp48MHzRefclkPwrDown,
|
||||
enable);
|
||||
smu_print("%s: smu_enable_48mhz_tmdp_refclk_pwrdwn = %d\n", __func__, enable ? 1 : 0);
|
||||
}
|
||||
|
||||
int dcn35_smu_exit_low_power_state(struct clk_mgr_internal *clk_mgr)
|
||||
{
|
||||
return dcn35_smu_send_msg_with_param(
|
||||
int retv;
|
||||
|
||||
retv = dcn35_smu_send_msg_with_param(
|
||||
clk_mgr,
|
||||
VBIOSSMC_MSG_DispPsrExit,
|
||||
0);
|
||||
smu_print("%s: smu_exit_low_power_state return = %d\n", __func__, retv);
|
||||
return retv;
|
||||
}
|
||||
|
||||
int dcn35_smu_get_ips_supported(struct clk_mgr_internal *clk_mgr)
|
||||
{
|
||||
return dcn35_smu_send_msg_with_param(
|
||||
int retv;
|
||||
|
||||
retv = dcn35_smu_send_msg_with_param(
|
||||
clk_mgr,
|
||||
VBIOSSMC_MSG_QueryIPS2Support,
|
||||
0);
|
||||
|
||||
//smu_print("%s: VBIOSSMC_MSG_QueryIPS2Support return = %x\n", __func__, retv);
|
||||
return retv;
|
||||
}
|
||||
|
||||
void dcn35_smu_write_ips_scratch(struct clk_mgr_internal *clk_mgr, uint32_t param)
|
||||
{
|
||||
REG_WRITE(MP1_SMN_C2PMSG_71, param);
|
||||
//smu_print("%s: write_ips_scratch = %x\n", __func__, param);
|
||||
}
|
||||
|
||||
uint32_t dcn35_smu_read_ips_scratch(struct clk_mgr_internal *clk_mgr)
|
||||
{
|
||||
return REG_READ(MP1_SMN_C2PMSG_71);
|
||||
uint32_t retv;
|
||||
|
||||
retv = REG_READ(MP1_SMN_C2PMSG_71);
|
||||
//smu_print("%s: dcn35_smu_read_ips_scratch = %x\n", __func__, retv);
|
||||
return retv;
|
||||
}
|
||||
|
@ -1521,7 +1521,7 @@ static void program_timing_sync(
|
||||
|
||||
}
|
||||
|
||||
/* remove any other pipes that are already been synced */
|
||||
/* remove any other unblanked pipes as they have already been synced */
|
||||
if (dc->config.use_pipe_ctx_sync_logic) {
|
||||
/* check pipe's syncd to decide which pipe to be removed */
|
||||
for (j = 1; j < group_size; j++) {
|
||||
@ -1534,6 +1534,7 @@ static void program_timing_sync(
|
||||
pipe_set[j]->pipe_idx_syncd = pipe_set[0]->pipe_idx_syncd;
|
||||
}
|
||||
} else {
|
||||
/* remove any other pipes by checking valid plane */
|
||||
for (j = j + 1; j < group_size; j++) {
|
||||
bool is_blanked;
|
||||
|
||||
@ -2258,23 +2259,16 @@ struct dc_state *dc_copy_state(struct dc_state *src_ctx)
|
||||
{
|
||||
int i, j;
|
||||
struct dc_state *new_ctx = kvmalloc(sizeof(struct dc_state), GFP_KERNEL);
|
||||
#ifdef CONFIG_DRM_AMD_DC_FP
|
||||
struct dml2_context *dml2 = NULL;
|
||||
#endif
|
||||
|
||||
if (!new_ctx)
|
||||
return NULL;
|
||||
memcpy(new_ctx, src_ctx, sizeof(struct dc_state));
|
||||
|
||||
#ifdef CONFIG_DRM_AMD_DC_FP
|
||||
if (new_ctx->bw_ctx.dml2) {
|
||||
dml2 = kzalloc(sizeof(struct dml2_context), GFP_KERNEL);
|
||||
if (!dml2)
|
||||
return NULL;
|
||||
|
||||
memcpy(dml2, src_ctx->bw_ctx.dml2, sizeof(struct dml2_context));
|
||||
new_ctx->bw_ctx.dml2 = dml2;
|
||||
}
|
||||
if (new_ctx->bw_ctx.dml2 && !dml2_create_copy(&new_ctx->bw_ctx.dml2, src_ctx->bw_ctx.dml2)) {
|
||||
dc_release_state(new_ctx);
|
||||
return NULL;
|
||||
}
|
||||
#endif
|
||||
|
||||
for (i = 0; i < MAX_PIPES; i++) {
|
||||
|
@ -4773,7 +4773,7 @@ void resource_build_bit_depth_reduction_params(struct dc_stream_state *stream,
|
||||
option = DITHER_OPTION_SPATIAL8;
|
||||
break;
|
||||
case COLOR_DEPTH_101010:
|
||||
option = DITHER_OPTION_SPATIAL10;
|
||||
option = DITHER_OPTION_TRUN10;
|
||||
break;
|
||||
default:
|
||||
option = DITHER_OPTION_DISABLE;
|
||||
@ -4799,6 +4799,8 @@ void resource_build_bit_depth_reduction_params(struct dc_stream_state *stream,
|
||||
option == DITHER_OPTION_TRUN10_SPATIAL8_FM6) {
|
||||
fmt_bit_depth->flags.TRUNCATE_ENABLED = 1;
|
||||
fmt_bit_depth->flags.TRUNCATE_DEPTH = 2;
|
||||
if (option == DITHER_OPTION_TRUN10)
|
||||
fmt_bit_depth->flags.TRUNCATE_MODE = 1;
|
||||
}
|
||||
|
||||
/* special case - Formatter can only reduce by 4 bits at most.
|
||||
|
@ -467,6 +467,52 @@ bool dc_stream_add_writeback(struct dc *dc,
|
||||
struct dwbc *dwb = dc->res_pool->dwbc[wb_info->dwb_pipe_inst];
|
||||
dwb->otg_inst = stream_status->primary_otg_inst;
|
||||
}
|
||||
|
||||
if (!dc->hwss.update_bandwidth(dc, dc->current_state)) {
|
||||
dm_error("DC: update_bandwidth failed!\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
/* enable writeback */
|
||||
if (dc->hwss.enable_writeback) {
|
||||
struct dwbc *dwb = dc->res_pool->dwbc[wb_info->dwb_pipe_inst];
|
||||
|
||||
if (dwb->funcs->is_enabled(dwb)) {
|
||||
/* writeback pipe already enabled, only need to update */
|
||||
dc->hwss.update_writeback(dc, wb_info, dc->current_state);
|
||||
} else {
|
||||
/* Enable writeback pipe from scratch*/
|
||||
dc->hwss.enable_writeback(dc, wb_info, dc->current_state);
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool dc_stream_fc_disable_writeback(struct dc *dc,
|
||||
struct dc_stream_state *stream,
|
||||
uint32_t dwb_pipe_inst)
|
||||
{
|
||||
struct dwbc *dwb = dc->res_pool->dwbc[dwb_pipe_inst];
|
||||
|
||||
if (stream == NULL) {
|
||||
dm_error("DC: dc_stream is NULL!\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
if (dwb_pipe_inst >= MAX_DWB_PIPES) {
|
||||
dm_error("DC: writeback pipe is invalid!\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
if (stream->num_wb_info > MAX_DWB_PIPES) {
|
||||
dm_error("DC: num_wb_info is invalid!\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
if (dwb->funcs->set_fc_enable)
|
||||
dwb->funcs->set_fc_enable(dwb, DWB_FRAME_CAPTURE_DISABLE);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -490,27 +536,37 @@ bool dc_stream_remove_writeback(struct dc *dc,
|
||||
return false;
|
||||
}
|
||||
|
||||
// stream->writeback_info[dwb_pipe_inst].wb_enabled = false;
|
||||
for (i = 0; i < stream->num_wb_info; i++) {
|
||||
/*dynamic update*/
|
||||
if (stream->writeback_info[i].wb_enabled &&
|
||||
stream->writeback_info[i].dwb_pipe_inst == dwb_pipe_inst) {
|
||||
stream->writeback_info[i].wb_enabled = false;
|
||||
}
|
||||
}
|
||||
|
||||
/* remove writeback info for disabled writeback pipes from stream */
|
||||
for (i = 0, j = 0; i < stream->num_wb_info; i++) {
|
||||
if (stream->writeback_info[i].wb_enabled) {
|
||||
if (j < i)
|
||||
/* trim the array */
|
||||
|
||||
if (stream->writeback_info[i].dwb_pipe_inst == dwb_pipe_inst)
|
||||
stream->writeback_info[i].wb_enabled = false;
|
||||
|
||||
/* trim the array */
|
||||
if (j < i) {
|
||||
memcpy(&stream->writeback_info[j], &stream->writeback_info[i],
|
||||
sizeof(struct dc_writeback_info));
|
||||
j++;
|
||||
j++;
|
||||
}
|
||||
}
|
||||
}
|
||||
stream->num_wb_info = j;
|
||||
|
||||
/* recalculate and apply DML parameters */
|
||||
if (!dc->hwss.update_bandwidth(dc, dc->current_state)) {
|
||||
dm_error("DC: update_bandwidth failed!\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
/* disable writeback */
|
||||
if (dc->hwss.disable_writeback) {
|
||||
struct dwbc *dwb = dc->res_pool->dwbc[dwb_pipe_inst];
|
||||
|
||||
if (dwb->funcs->is_enabled(dwb))
|
||||
dc->hwss.disable_writeback(dc, dwb_pipe_inst);
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -49,7 +49,7 @@ struct aux_payload;
|
||||
struct set_config_cmd_payload;
|
||||
struct dmub_notification;
|
||||
|
||||
#define DC_VER "3.2.262"
|
||||
#define DC_VER "3.2.263"
|
||||
|
||||
#define MAX_SURFACES 3
|
||||
#define MAX_PLANES 6
|
||||
@ -1541,7 +1541,13 @@ struct dc_link {
|
||||
bool is_dig_mapping_flexible;
|
||||
bool hpd_status; /* HPD status of link without physical HPD pin. */
|
||||
bool is_hpd_pending; /* Indicates a new received hpd */
|
||||
bool is_automated; /* Indicates automated testing */
|
||||
|
||||
/* USB4 DPIA links skip verifying link cap, instead performing the fallback method
|
||||
* for every link training. This is incompatible with DP LL compliance automation,
|
||||
* which expects the same link settings to be used every retry on a link loss.
|
||||
* This flag is used to skip the fallback when link loss occurs during automation.
|
||||
*/
|
||||
bool skip_fallback_on_link_loss;
|
||||
|
||||
bool edp_sink_present;
|
||||
|
||||
|
@ -140,7 +140,7 @@ struct dc_vbios_funcs {
|
||||
enum bp_result (*enable_lvtma_control)(
|
||||
struct dc_bios *bios,
|
||||
uint8_t uc_pwr_on,
|
||||
uint8_t panel_instance,
|
||||
uint8_t pwrseq_instance,
|
||||
uint8_t bypass_panel_control_wait);
|
||||
|
||||
enum bp_result (*get_soc_bb_info)(
|
||||
|
@ -465,6 +465,7 @@ struct dc_cursor_mi_param {
|
||||
struct fixed31_32 v_scale_ratio;
|
||||
enum dc_rotation_angle rotation;
|
||||
bool mirror;
|
||||
struct dc_stream_state *stream;
|
||||
};
|
||||
|
||||
/* IPP related types */
|
||||
|
@ -454,6 +454,10 @@ bool dc_stream_add_writeback(struct dc *dc,
|
||||
struct dc_stream_state *stream,
|
||||
struct dc_writeback_info *wb_info);
|
||||
|
||||
bool dc_stream_fc_disable_writeback(struct dc *dc,
|
||||
struct dc_stream_state *stream,
|
||||
uint32_t dwb_pipe_inst);
|
||||
|
||||
bool dc_stream_remove_writeback(struct dc *dc,
|
||||
struct dc_stream_state *stream,
|
||||
uint32_t dwb_pipe_inst);
|
||||
|
@ -145,7 +145,11 @@ static bool dmub_abm_save_restore_ex(
|
||||
return ret;
|
||||
}
|
||||
|
||||
static bool dmub_abm_set_pipe_ex(struct abm *abm, uint32_t otg_inst, uint32_t option, uint32_t panel_inst)
|
||||
static bool dmub_abm_set_pipe_ex(struct abm *abm,
|
||||
uint32_t otg_inst,
|
||||
uint32_t option,
|
||||
uint32_t panel_inst,
|
||||
uint32_t pwrseq_inst)
|
||||
{
|
||||
bool ret = false;
|
||||
unsigned int feature_support;
|
||||
@ -153,7 +157,7 @@ static bool dmub_abm_set_pipe_ex(struct abm *abm, uint32_t otg_inst, uint32_t op
|
||||
feature_support = abm_feature_support(abm, panel_inst);
|
||||
|
||||
if (feature_support == ABM_LCD_SUPPORT)
|
||||
ret = dmub_abm_set_pipe(abm, otg_inst, option, panel_inst);
|
||||
ret = dmub_abm_set_pipe(abm, otg_inst, option, panel_inst, pwrseq_inst);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -254,7 +254,11 @@ bool dmub_abm_save_restore(
|
||||
return true;
|
||||
}
|
||||
|
||||
bool dmub_abm_set_pipe(struct abm *abm, uint32_t otg_inst, uint32_t option, uint32_t panel_inst)
|
||||
bool dmub_abm_set_pipe(struct abm *abm,
|
||||
uint32_t otg_inst,
|
||||
uint32_t option,
|
||||
uint32_t panel_inst,
|
||||
uint32_t pwrseq_inst)
|
||||
{
|
||||
union dmub_rb_cmd cmd;
|
||||
struct dc_context *dc = abm->ctx;
|
||||
@ -264,6 +268,7 @@ bool dmub_abm_set_pipe(struct abm *abm, uint32_t otg_inst, uint32_t option, uint
|
||||
cmd.abm_set_pipe.header.type = DMUB_CMD__ABM;
|
||||
cmd.abm_set_pipe.header.sub_type = DMUB_CMD__ABM_SET_PIPE;
|
||||
cmd.abm_set_pipe.abm_set_pipe_data.otg_inst = otg_inst;
|
||||
cmd.abm_set_pipe.abm_set_pipe_data.pwrseq_inst = pwrseq_inst;
|
||||
cmd.abm_set_pipe.abm_set_pipe_data.set_pipe_option = option;
|
||||
cmd.abm_set_pipe.abm_set_pipe_data.panel_inst = panel_inst;
|
||||
cmd.abm_set_pipe.abm_set_pipe_data.ramping_boundary = ramping_boundary;
|
||||
|
@ -44,7 +44,7 @@ bool dmub_abm_save_restore(
|
||||
struct dc_context *dc,
|
||||
unsigned int panel_inst,
|
||||
struct abm_save_restore *pData);
|
||||
bool dmub_abm_set_pipe(struct abm *abm, uint32_t otg_inst, uint32_t option, uint32_t panel_inst);
|
||||
bool dmub_abm_set_pipe(struct abm *abm, uint32_t otg_inst, uint32_t option, uint32_t panel_inst, uint32_t pwrseq_inst);
|
||||
bool dmub_abm_set_backlight_level(struct abm *abm,
|
||||
unsigned int backlight_pwm_u16_16,
|
||||
unsigned int frame_ramp,
|
||||
|
@ -296,6 +296,38 @@
|
||||
type DTBCLK_P1_GATE_DISABLE;\
|
||||
type DTBCLK_P2_GATE_DISABLE;\
|
||||
type DTBCLK_P3_GATE_DISABLE;\
|
||||
type DSCCLK0_ROOT_GATE_DISABLE;\
|
||||
type DSCCLK1_ROOT_GATE_DISABLE;\
|
||||
type DSCCLK2_ROOT_GATE_DISABLE;\
|
||||
type DSCCLK3_ROOT_GATE_DISABLE;\
|
||||
type SYMCLKA_FE_ROOT_GATE_DISABLE;\
|
||||
type SYMCLKB_FE_ROOT_GATE_DISABLE;\
|
||||
type SYMCLKC_FE_ROOT_GATE_DISABLE;\
|
||||
type SYMCLKD_FE_ROOT_GATE_DISABLE;\
|
||||
type SYMCLKE_FE_ROOT_GATE_DISABLE;\
|
||||
type DPPCLK0_ROOT_GATE_DISABLE;\
|
||||
type DPPCLK1_ROOT_GATE_DISABLE;\
|
||||
type DPPCLK2_ROOT_GATE_DISABLE;\
|
||||
type DPPCLK3_ROOT_GATE_DISABLE;\
|
||||
type HDMISTREAMCLK0_ROOT_GATE_DISABLE;\
|
||||
type SYMCLKA_ROOT_GATE_DISABLE;\
|
||||
type SYMCLKB_ROOT_GATE_DISABLE;\
|
||||
type SYMCLKC_ROOT_GATE_DISABLE;\
|
||||
type SYMCLKD_ROOT_GATE_DISABLE;\
|
||||
type SYMCLKE_ROOT_GATE_DISABLE;\
|
||||
type PHYA_REFCLK_ROOT_GATE_DISABLE;\
|
||||
type PHYB_REFCLK_ROOT_GATE_DISABLE;\
|
||||
type PHYC_REFCLK_ROOT_GATE_DISABLE;\
|
||||
type PHYD_REFCLK_ROOT_GATE_DISABLE;\
|
||||
type PHYE_REFCLK_ROOT_GATE_DISABLE;\
|
||||
type DPSTREAMCLK0_ROOT_GATE_DISABLE;\
|
||||
type DPSTREAMCLK1_ROOT_GATE_DISABLE;\
|
||||
type DPSTREAMCLK2_ROOT_GATE_DISABLE;\
|
||||
type DPSTREAMCLK3_ROOT_GATE_DISABLE;\
|
||||
type DPSTREAMCLK0_GATE_DISABLE;\
|
||||
type DPSTREAMCLK1_GATE_DISABLE;\
|
||||
type DPSTREAMCLK2_GATE_DISABLE;\
|
||||
type DPSTREAMCLK3_GATE_DISABLE;\
|
||||
|
||||
struct dccg_shift {
|
||||
DCCG_REG_FIELD_LIST(uint8_t)
|
||||
|
@ -1077,8 +1077,16 @@ void hubp2_cursor_set_position(
|
||||
if (src_y_offset < 0)
|
||||
src_y_offset = 0;
|
||||
/* Save necessary cursor info x, y position. w, h is saved in attribute func. */
|
||||
hubp->cur_rect.x = src_x_offset + param->viewport.x;
|
||||
hubp->cur_rect.y = src_y_offset + param->viewport.y;
|
||||
if (param->stream->link->psr_settings.psr_version >= DC_PSR_VERSION_SU_1 &&
|
||||
param->rotation != ROTATION_ANGLE_0) {
|
||||
hubp->cur_rect.x = 0;
|
||||
hubp->cur_rect.y = 0;
|
||||
hubp->cur_rect.w = param->stream->timing.h_addressable;
|
||||
hubp->cur_rect.h = param->stream->timing.v_addressable;
|
||||
} else {
|
||||
hubp->cur_rect.x = src_x_offset + param->viewport.x;
|
||||
hubp->cur_rect.y = src_y_offset + param->viewport.y;
|
||||
}
|
||||
}
|
||||
|
||||
void hubp2_clk_cntl(struct hubp *hubp, bool enable)
|
||||
|
@ -130,6 +130,28 @@ bool dwb3_disable(struct dwbc *dwbc)
|
||||
return true;
|
||||
}
|
||||
|
||||
void dwb3_set_fc_enable(struct dwbc *dwbc, enum dwb_frame_capture_enable enable)
|
||||
{
|
||||
struct dcn30_dwbc *dwbc30 = TO_DCN30_DWBC(dwbc);
|
||||
unsigned int pre_locked;
|
||||
|
||||
REG_GET(DWB_UPDATE_CTRL, DWB_UPDATE_LOCK, &pre_locked);
|
||||
|
||||
/* Lock DWB registers */
|
||||
if (pre_locked == 0)
|
||||
REG_UPDATE(DWB_UPDATE_CTRL, DWB_UPDATE_LOCK, 1);
|
||||
|
||||
/* Disable FC */
|
||||
REG_UPDATE(FC_MODE_CTRL, FC_FRAME_CAPTURE_EN, enable);
|
||||
|
||||
/* Unlock DWB registers */
|
||||
if (pre_locked == 0)
|
||||
REG_UPDATE(DWB_UPDATE_CTRL, DWB_UPDATE_LOCK, 0);
|
||||
|
||||
DC_LOG_DWB("%s dwb3_fc_disabled at inst = %d", __func__, dwbc->inst);
|
||||
}
|
||||
|
||||
|
||||
bool dwb3_update(struct dwbc *dwbc, struct dc_dwb_params *params)
|
||||
{
|
||||
struct dcn30_dwbc *dwbc30 = TO_DCN30_DWBC(dwbc);
|
||||
@ -226,6 +248,7 @@ static const struct dwbc_funcs dcn30_dwbc_funcs = {
|
||||
.disable = dwb3_disable,
|
||||
.update = dwb3_update,
|
||||
.is_enabled = dwb3_is_enabled,
|
||||
.set_fc_enable = dwb3_set_fc_enable,
|
||||
.set_stereo = dwb3_set_stereo,
|
||||
.set_new_content = dwb3_set_new_content,
|
||||
.dwb_program_output_csc = NULL,
|
||||
|
@ -877,6 +877,8 @@ bool dwb3_update(struct dwbc *dwbc, struct dc_dwb_params *params);
|
||||
|
||||
bool dwb3_is_enabled(struct dwbc *dwbc);
|
||||
|
||||
void dwb3_set_fc_enable(struct dwbc *dwbc, enum dwb_frame_capture_enable enable);
|
||||
|
||||
void dwb3_set_stereo(struct dwbc *dwbc,
|
||||
struct dwb_stereo_params *stereo_params);
|
||||
|
||||
|
@ -243,6 +243,9 @@ static bool dwb3_program_ogam_lut(
|
||||
return false;
|
||||
}
|
||||
|
||||
if (params->hw_points_num == 0)
|
||||
return false;
|
||||
|
||||
REG_SET(DWB_OGAM_CONTROL, 0, DWB_OGAM_MODE, 2);
|
||||
|
||||
current_mode = dwb3_get_ogam_current(dwbc30);
|
||||
|
@ -50,7 +50,7 @@ static bool dcn31_query_backlight_info(struct panel_cntl *panel_cntl, union dmub
|
||||
cmd->panel_cntl.header.type = DMUB_CMD__PANEL_CNTL;
|
||||
cmd->panel_cntl.header.sub_type = DMUB_CMD__PANEL_CNTL_QUERY_BACKLIGHT_INFO;
|
||||
cmd->panel_cntl.header.payload_bytes = sizeof(cmd->panel_cntl.data);
|
||||
cmd->panel_cntl.data.inst = dcn31_panel_cntl->base.inst;
|
||||
cmd->panel_cntl.data.pwrseq_inst = dcn31_panel_cntl->base.pwrseq_inst;
|
||||
|
||||
return dm_execute_dmub_cmd(dc_dmub_srv->ctx, cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY);
|
||||
}
|
||||
@ -78,7 +78,7 @@ static uint32_t dcn31_panel_cntl_hw_init(struct panel_cntl *panel_cntl)
|
||||
cmd.panel_cntl.header.type = DMUB_CMD__PANEL_CNTL;
|
||||
cmd.panel_cntl.header.sub_type = DMUB_CMD__PANEL_CNTL_HW_INIT;
|
||||
cmd.panel_cntl.header.payload_bytes = sizeof(cmd.panel_cntl.data);
|
||||
cmd.panel_cntl.data.inst = dcn31_panel_cntl->base.inst;
|
||||
cmd.panel_cntl.data.pwrseq_inst = dcn31_panel_cntl->base.pwrseq_inst;
|
||||
cmd.panel_cntl.data.bl_pwm_cntl = panel_cntl->stored_backlight_registers.BL_PWM_CNTL;
|
||||
cmd.panel_cntl.data.bl_pwm_period_cntl = panel_cntl->stored_backlight_registers.BL_PWM_PERIOD_CNTL;
|
||||
cmd.panel_cntl.data.bl_pwm_ref_div1 =
|
||||
@ -157,4 +157,5 @@ void dcn31_panel_cntl_construct(
|
||||
dcn31_panel_cntl->base.funcs = &dcn31_link_panel_cntl_funcs;
|
||||
dcn31_panel_cntl->base.ctx = init_data->ctx;
|
||||
dcn31_panel_cntl->base.inst = init_data->inst;
|
||||
dcn31_panel_cntl->base.pwrseq_inst = init_data->pwrseq_inst;
|
||||
}
|
||||
|
@ -71,12 +71,13 @@ void mpc32_power_on_blnd_lut(
|
||||
{
|
||||
struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc);
|
||||
|
||||
REG_SET(MPCC_MCM_MEM_PWR_CTRL[mpcc_id], 0, MPCC_MCM_1DLUT_MEM_PWR_DIS, power_on);
|
||||
|
||||
if (mpc->ctx->dc->debug.enable_mem_low_power.bits.cm) {
|
||||
if (power_on) {
|
||||
REG_UPDATE(MPCC_MCM_MEM_PWR_CTRL[mpcc_id], MPCC_MCM_1DLUT_MEM_PWR_FORCE, 0);
|
||||
REG_WAIT(MPCC_MCM_MEM_PWR_CTRL[mpcc_id], MPCC_MCM_1DLUT_MEM_PWR_STATE, 0, 1, 5);
|
||||
} else if (!mpc->ctx->dc->debug.disable_mem_low_power) {
|
||||
ASSERT(false);
|
||||
/* TODO: change to mpc
|
||||
* dpp_base->ctx->dc->optimized_required = true;
|
||||
* dpp_base->deferred_reg_writes.bits.disable_blnd_lut = true;
|
||||
|
@ -806,3 +806,29 @@ bool dcn32_subvp_vblank_admissable(struct dc *dc, struct dc_state *context, int
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
void dcn32_update_dml_pipes_odm_policy_based_on_context(struct dc *dc, struct dc_state *context,
|
||||
display_e2e_pipe_params_st *pipes)
|
||||
{
|
||||
int i, pipe_cnt;
|
||||
struct resource_context *res_ctx = &context->res_ctx;
|
||||
struct pipe_ctx *pipe = NULL;
|
||||
|
||||
for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) {
|
||||
int odm_slice_count = 0;
|
||||
|
||||
if (!res_ctx->pipe_ctx[i].stream)
|
||||
continue;
|
||||
pipe = &res_ctx->pipe_ctx[i];
|
||||
odm_slice_count = resource_get_odm_slice_count(pipe);
|
||||
|
||||
if (odm_slice_count == 1)
|
||||
pipes[pipe_cnt].pipe.dest.odm_combine_policy = dm_odm_combine_policy_dal;
|
||||
else if (odm_slice_count == 2)
|
||||
pipes[pipe_cnt].pipe.dest.odm_combine_policy = dm_odm_combine_policy_2to1;
|
||||
else if (odm_slice_count == 4)
|
||||
pipes[pipe_cnt].pipe.dest.odm_combine_policy = dm_odm_combine_policy_4to1;
|
||||
|
||||
pipe_cnt++;
|
||||
}
|
||||
}
|
||||
|
@ -506,6 +506,64 @@ static void dccg35_dpp_root_clock_control(
|
||||
dccg->dpp_clock_gated[dpp_inst] = !clock_on;
|
||||
}
|
||||
|
||||
static void dccg35_disable_symclk32_se(
|
||||
struct dccg *dccg,
|
||||
int hpo_se_inst)
|
||||
{
|
||||
struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
|
||||
|
||||
/* set refclk as the source for symclk32_se */
|
||||
switch (hpo_se_inst) {
|
||||
case 0:
|
||||
REG_UPDATE_2(SYMCLK32_SE_CNTL,
|
||||
SYMCLK32_SE0_SRC_SEL, 0,
|
||||
SYMCLK32_SE0_EN, 0);
|
||||
if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se) {
|
||||
REG_UPDATE(DCCG_GATE_DISABLE_CNTL3,
|
||||
SYMCLK32_SE0_GATE_DISABLE, 0);
|
||||
// REG_UPDATE(DCCG_GATE_DISABLE_CNTL3,
|
||||
// SYMCLK32_ROOT_SE0_GATE_DISABLE, 0);
|
||||
}
|
||||
break;
|
||||
case 1:
|
||||
REG_UPDATE_2(SYMCLK32_SE_CNTL,
|
||||
SYMCLK32_SE1_SRC_SEL, 0,
|
||||
SYMCLK32_SE1_EN, 0);
|
||||
if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se) {
|
||||
REG_UPDATE(DCCG_GATE_DISABLE_CNTL3,
|
||||
SYMCLK32_SE1_GATE_DISABLE, 0);
|
||||
// REG_UPDATE(DCCG_GATE_DISABLE_CNTL3,
|
||||
// SYMCLK32_ROOT_SE1_GATE_DISABLE, 0);
|
||||
}
|
||||
break;
|
||||
case 2:
|
||||
REG_UPDATE_2(SYMCLK32_SE_CNTL,
|
||||
SYMCLK32_SE2_SRC_SEL, 0,
|
||||
SYMCLK32_SE2_EN, 0);
|
||||
if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se) {
|
||||
REG_UPDATE(DCCG_GATE_DISABLE_CNTL3,
|
||||
SYMCLK32_SE2_GATE_DISABLE, 0);
|
||||
// REG_UPDATE(DCCG_GATE_DISABLE_CNTL3,
|
||||
// SYMCLK32_ROOT_SE2_GATE_DISABLE, 0);
|
||||
}
|
||||
break;
|
||||
case 3:
|
||||
REG_UPDATE_2(SYMCLK32_SE_CNTL,
|
||||
SYMCLK32_SE3_SRC_SEL, 0,
|
||||
SYMCLK32_SE3_EN, 0);
|
||||
if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se) {
|
||||
REG_UPDATE(DCCG_GATE_DISABLE_CNTL3,
|
||||
SYMCLK32_SE3_GATE_DISABLE, 0);
|
||||
// REG_UPDATE(DCCG_GATE_DISABLE_CNTL3,
|
||||
// SYMCLK32_ROOT_SE3_GATE_DISABLE, 0);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
BREAK_TO_DEBUGGER();
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
void dccg35_init(struct dccg *dccg)
|
||||
{
|
||||
int otg_inst;
|
||||
@ -514,7 +572,7 @@ void dccg35_init(struct dccg *dccg)
|
||||
* will cause DCN to hang.
|
||||
*/
|
||||
for (otg_inst = 0; otg_inst < 4; otg_inst++)
|
||||
dccg31_disable_symclk32_se(dccg, otg_inst);
|
||||
dccg35_disable_symclk32_se(dccg, otg_inst);
|
||||
|
||||
if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_le)
|
||||
for (otg_inst = 0; otg_inst < 2; otg_inst++)
|
||||
@ -788,7 +846,7 @@ static const struct dccg_funcs dccg35_funcs = {
|
||||
.dccg_init = dccg35_init,
|
||||
.set_dpstreamclk = dccg35_set_dpstreamclk,
|
||||
.enable_symclk32_se = dccg31_enable_symclk32_se,
|
||||
.disable_symclk32_se = dccg31_disable_symclk32_se,
|
||||
.disable_symclk32_se = dccg35_disable_symclk32_se,
|
||||
.enable_symclk32_le = dccg31_enable_symclk32_le,
|
||||
.disable_symclk32_le = dccg31_disable_symclk32_le,
|
||||
.set_symclk32_le_root_clock_gating = dccg31_set_symclk32_le_root_clock_gating,
|
||||
|
@ -34,6 +34,7 @@
|
||||
#define DCCG_REG_LIST_DCN35() \
|
||||
DCCG_REG_LIST_DCN314(),\
|
||||
SR(DPPCLK_CTRL),\
|
||||
SR(DCCG_GATE_DISABLE_CNTL4),\
|
||||
SR(DCCG_GATE_DISABLE_CNTL5),\
|
||||
SR(DCCG_GATE_DISABLE_CNTL6),\
|
||||
SR(DCCG_GLOBAL_FGCG_REP_CNTL),\
|
||||
@ -180,6 +181,56 @@
|
||||
DCCG_SF(DCCG_GATE_DISABLE_CNTL5, DTBCLK_P1_GATE_DISABLE, mask_sh),\
|
||||
DCCG_SF(DCCG_GATE_DISABLE_CNTL5, DTBCLK_P2_GATE_DISABLE, mask_sh),\
|
||||
DCCG_SF(DCCG_GATE_DISABLE_CNTL5, DTBCLK_P3_GATE_DISABLE, mask_sh),\
|
||||
DCCG_SF(DCCG_GATE_DISABLE_CNTL6, DSCCLK0_ROOT_GATE_DISABLE, mask_sh),\
|
||||
DCCG_SF(DCCG_GATE_DISABLE_CNTL6, DSCCLK1_ROOT_GATE_DISABLE, mask_sh),\
|
||||
DCCG_SF(DCCG_GATE_DISABLE_CNTL6, DSCCLK2_ROOT_GATE_DISABLE, mask_sh),\
|
||||
DCCG_SF(DCCG_GATE_DISABLE_CNTL6, DSCCLK3_ROOT_GATE_DISABLE, mask_sh),\
|
||||
DCCG_SF(DCCG_GATE_DISABLE_CNTL5, SYMCLKA_FE_ROOT_GATE_DISABLE, mask_sh),\
|
||||
DCCG_SF(DCCG_GATE_DISABLE_CNTL5, SYMCLKB_FE_ROOT_GATE_DISABLE, mask_sh),\
|
||||
DCCG_SF(DCCG_GATE_DISABLE_CNTL5, SYMCLKC_FE_ROOT_GATE_DISABLE, mask_sh),\
|
||||
DCCG_SF(DCCG_GATE_DISABLE_CNTL5, SYMCLKD_FE_ROOT_GATE_DISABLE, mask_sh),\
|
||||
DCCG_SF(DCCG_GATE_DISABLE_CNTL5, SYMCLKE_FE_ROOT_GATE_DISABLE, mask_sh),\
|
||||
DCCG_SF(DCCG_GATE_DISABLE_CNTL6, DPPCLK0_ROOT_GATE_DISABLE, mask_sh),\
|
||||
DCCG_SF(DCCG_GATE_DISABLE_CNTL6, DPPCLK1_ROOT_GATE_DISABLE, mask_sh),\
|
||||
DCCG_SF(DCCG_GATE_DISABLE_CNTL6, DPPCLK2_ROOT_GATE_DISABLE, mask_sh),\
|
||||
DCCG_SF(DCCG_GATE_DISABLE_CNTL6, DPPCLK3_ROOT_GATE_DISABLE, mask_sh),\
|
||||
DCCG_SF(DCCG_GATE_DISABLE_CNTL2, HDMICHARCLK0_GATE_DISABLE, mask_sh),\
|
||||
DCCG_SF(DCCG_GATE_DISABLE_CNTL4, HDMICHARCLK0_ROOT_GATE_DISABLE, mask_sh),\
|
||||
DCCG_SF(DCCG_GATE_DISABLE_CNTL6, HDMISTREAMCLK0_ROOT_GATE_DISABLE, mask_sh),\
|
||||
DCCG_SF(DCCG_GATE_DISABLE_CNTL5, SYMCLKA_ROOT_GATE_DISABLE, mask_sh),\
|
||||
DCCG_SF(DCCG_GATE_DISABLE_CNTL5, SYMCLKB_ROOT_GATE_DISABLE, mask_sh),\
|
||||
DCCG_SF(DCCG_GATE_DISABLE_CNTL5, SYMCLKC_ROOT_GATE_DISABLE, mask_sh),\
|
||||
DCCG_SF(DCCG_GATE_DISABLE_CNTL5, SYMCLKD_ROOT_GATE_DISABLE, mask_sh),\
|
||||
DCCG_SF(DCCG_GATE_DISABLE_CNTL5, SYMCLKE_ROOT_GATE_DISABLE, mask_sh),\
|
||||
DCCG_SF(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_ROOT_SE0_GATE_DISABLE, mask_sh),\
|
||||
DCCG_SF(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_ROOT_SE1_GATE_DISABLE, mask_sh),\
|
||||
DCCG_SF(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_ROOT_SE2_GATE_DISABLE, mask_sh),\
|
||||
DCCG_SF(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_ROOT_SE3_GATE_DISABLE, mask_sh),\
|
||||
DCCG_SF(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_ROOT_LE0_GATE_DISABLE, mask_sh),\
|
||||
DCCG_SF(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_ROOT_LE1_GATE_DISABLE, mask_sh),\
|
||||
DCCG_SF(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_SE0_GATE_DISABLE, mask_sh),\
|
||||
DCCG_SF(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_SE1_GATE_DISABLE, mask_sh),\
|
||||
DCCG_SF(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_SE2_GATE_DISABLE, mask_sh),\
|
||||
DCCG_SF(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_SE3_GATE_DISABLE, mask_sh),\
|
||||
DCCG_SF(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_LE0_GATE_DISABLE, mask_sh),\
|
||||
DCCG_SF(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_LE1_GATE_DISABLE, mask_sh),\
|
||||
DCCG_SF(DCCG_GATE_DISABLE_CNTL4, PHYA_REFCLK_ROOT_GATE_DISABLE, mask_sh),\
|
||||
DCCG_SF(DCCG_GATE_DISABLE_CNTL4, PHYB_REFCLK_ROOT_GATE_DISABLE, mask_sh),\
|
||||
DCCG_SF(DCCG_GATE_DISABLE_CNTL4, PHYC_REFCLK_ROOT_GATE_DISABLE, mask_sh),\
|
||||
DCCG_SF(DCCG_GATE_DISABLE_CNTL4, PHYD_REFCLK_ROOT_GATE_DISABLE, mask_sh),\
|
||||
DCCG_SF(DCCG_GATE_DISABLE_CNTL4, PHYE_REFCLK_ROOT_GATE_DISABLE, mask_sh),\
|
||||
DCCG_SF(DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK0_ROOT_GATE_DISABLE, mask_sh),\
|
||||
DCCG_SF(DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK1_ROOT_GATE_DISABLE, mask_sh),\
|
||||
DCCG_SF(DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK2_ROOT_GATE_DISABLE, mask_sh),\
|
||||
DCCG_SF(DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK3_ROOT_GATE_DISABLE, mask_sh),\
|
||||
DCCG_SF(HDMISTREAMCLK0_DTO_PARAM, HDMISTREAMCLK0_DTO_PHASE, mask_sh),\
|
||||
DCCG_SF(HDMISTREAMCLK0_DTO_PARAM, HDMISTREAMCLK0_DTO_MODULO, mask_sh),\
|
||||
DCCG_SF(DCCG_GATE_DISABLE_CNTL, DISPCLK_DCCG_GATE_DISABLE, mask_sh),\
|
||||
DCCG_SF(DCCG_GATE_DISABLE_CNTL3, HDMISTREAMCLK0_GATE_DISABLE, mask_sh),\
|
||||
DCCG_SF(DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK0_GATE_DISABLE, mask_sh),\
|
||||
DCCG_SF(DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK1_GATE_DISABLE, mask_sh),\
|
||||
DCCG_SF(DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK2_GATE_DISABLE, mask_sh),\
|
||||
DCCG_SF(DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK3_GATE_DISABLE, mask_sh),\
|
||||
|
||||
struct dccg *dccg35_create(
|
||||
struct dc_context *ctx,
|
||||
|
@ -3542,7 +3542,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
|
||||
{
|
||||
struct vba_vars_st *v = &mode_lib->vba;
|
||||
int MinPrefetchMode, MaxPrefetchMode;
|
||||
int i;
|
||||
int i, start_state;
|
||||
unsigned int j, k, m;
|
||||
bool EnoughWritebackUnits = true;
|
||||
bool WritebackModeSupport = true;
|
||||
@ -3553,6 +3553,11 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
|
||||
|
||||
/*MODE SUPPORT, VOLTAGE STATE AND SOC CONFIGURATION*/
|
||||
|
||||
if (mode_lib->validate_max_state)
|
||||
start_state = v->soc.num_states - 1;
|
||||
else
|
||||
start_state = 0;
|
||||
|
||||
CalculateMinAndMaxPrefetchMode(
|
||||
mode_lib->vba.AllowDRAMSelfRefreshOrDRAMClockChangeInVblank,
|
||||
&MinPrefetchMode, &MaxPrefetchMode);
|
||||
@ -3851,7 +3856,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
|
||||
v->SingleDPPViewportSizeSupportPerPlane,
|
||||
&v->ViewportSizeSupport[0][0]);
|
||||
|
||||
for (i = 0; i < v->soc.num_states; i++) {
|
||||
for (i = start_state; i < v->soc.num_states; i++) {
|
||||
for (j = 0; j < 2; j++) {
|
||||
v->MaxDispclkRoundedDownToDFSGranularity = RoundToDFSGranularityDown(v->MaxDispclk[i], v->DISPCLKDPPCLKVCOSpeed);
|
||||
v->MaxDppclkRoundedDownToDFSGranularity = RoundToDFSGranularityDown(v->MaxDppclk[i], v->DISPCLKDPPCLKVCOSpeed);
|
||||
@ -4007,7 +4012,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
|
||||
|
||||
/*Total Available Pipes Support Check*/
|
||||
|
||||
for (i = 0; i < v->soc.num_states; i++) {
|
||||
for (i = start_state; i < v->soc.num_states; i++) {
|
||||
for (j = 0; j < 2; j++) {
|
||||
if (v->TotalNumberOfActiveDPP[i][j] <= v->MaxNumDPP) {
|
||||
v->TotalAvailablePipesSupport[i][j] = true;
|
||||
@ -4046,7 +4051,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
|
||||
}
|
||||
}
|
||||
|
||||
for (i = 0; i < v->soc.num_states; i++) {
|
||||
for (i = start_state; i < v->soc.num_states; i++) {
|
||||
for (k = 0; k <= v->NumberOfActivePlanes - 1; k++) {
|
||||
v->RequiresDSC[i][k] = false;
|
||||
v->RequiresFEC[i][k] = false;
|
||||
@ -4174,7 +4179,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
|
||||
}
|
||||
}
|
||||
}
|
||||
for (i = 0; i < v->soc.num_states; i++) {
|
||||
for (i = start_state; i < v->soc.num_states; i++) {
|
||||
v->DIOSupport[i] = true;
|
||||
for (k = 0; k <= v->NumberOfActivePlanes - 1; k++) {
|
||||
if (!v->skip_dio_check[k] && v->BlendingAndTiming[k] == k && (v->Output[k] == dm_dp || v->Output[k] == dm_edp || v->Output[k] == dm_hdmi)
|
||||
@ -4185,7 +4190,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
|
||||
}
|
||||
}
|
||||
|
||||
for (i = 0; i < v->soc.num_states; ++i) {
|
||||
for (i = start_state; i < v->soc.num_states; ++i) {
|
||||
v->ODMCombine4To1SupportCheckOK[i] = true;
|
||||
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
|
||||
if (v->BlendingAndTiming[k] == k && v->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_4to1
|
||||
@ -4197,7 +4202,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
|
||||
|
||||
/* Skip dscclk validation: as long as dispclk is supported, dscclk is also implicitly supported */
|
||||
|
||||
for (i = 0; i < v->soc.num_states; i++) {
|
||||
for (i = start_state; i < v->soc.num_states; i++) {
|
||||
v->NotEnoughDSCUnits[i] = false;
|
||||
v->TotalDSCUnitsRequired = 0.0;
|
||||
for (k = 0; k <= v->NumberOfActivePlanes - 1; k++) {
|
||||
@ -4217,7 +4222,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
|
||||
}
|
||||
/*DSC Delay per state*/
|
||||
|
||||
for (i = 0; i < v->soc.num_states; i++) {
|
||||
for (i = start_state; i < v->soc.num_states; i++) {
|
||||
for (k = 0; k <= v->NumberOfActivePlanes - 1; k++) {
|
||||
if (v->OutputBppPerState[i][k] == BPP_INVALID) {
|
||||
v->BPP = 0.0;
|
||||
@ -4333,7 +4338,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
|
||||
v->cursor_bw[k] = v->NumberOfCursors[k] * v->CursorWidth[k][0] * v->CursorBPP[k][0] / 8.0 / (v->HTotal[k] / v->PixelClock[k]) * v->VRatio[k];
|
||||
}
|
||||
|
||||
for (i = 0; i < v->soc.num_states; i++) {
|
||||
for (i = start_state; i < v->soc.num_states; i++) {
|
||||
for (j = 0; j < 2; j++) {
|
||||
for (k = 0; k <= v->NumberOfActivePlanes - 1; k++) {
|
||||
v->swath_width_luma_ub_this_state[k] = v->swath_width_luma_ub_all_states[i][j][k];
|
||||
@ -5075,7 +5080,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
|
||||
|
||||
/*PTE Buffer Size Check*/
|
||||
|
||||
for (i = 0; i < v->soc.num_states; i++) {
|
||||
for (i = start_state; i < v->soc.num_states; i++) {
|
||||
for (j = 0; j < 2; j++) {
|
||||
v->PTEBufferSizeNotExceeded[i][j] = true;
|
||||
for (k = 0; k <= v->NumberOfActivePlanes - 1; k++) {
|
||||
@ -5136,7 +5141,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
|
||||
}
|
||||
/*Mode Support, Voltage State and SOC Configuration*/
|
||||
|
||||
for (i = v->soc.num_states - 1; i >= 0; i--) {
|
||||
for (i = v->soc.num_states - 1; i >= start_state; i--) {
|
||||
for (j = 0; j < 2; j++) {
|
||||
if (v->ScaleRatioAndTapsSupport == 1 && v->SourceFormatPixelAndScanSupport == 1 && v->ViewportSizeSupport[i][j] == 1
|
||||
&& v->DIOSupport[i] == 1 && v->ODMCombine4To1SupportCheckOK[i] == 1
|
||||
@ -5158,7 +5163,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
|
||||
}
|
||||
{
|
||||
unsigned int MaximumMPCCombine = 0;
|
||||
for (i = v->soc.num_states; i >= 0; i--) {
|
||||
for (i = v->soc.num_states; i >= start_state; i--) {
|
||||
if (i == v->soc.num_states || v->ModeSupport[i][0] == true || v->ModeSupport[i][1] == true) {
|
||||
v->VoltageLevel = i;
|
||||
v->ModeIsSupported = v->ModeSupport[i][0] == true || v->ModeSupport[i][1] == true;
|
||||
|
@ -2192,6 +2192,7 @@ bool dcn32_internal_validate_bw(struct dc *dc,
|
||||
int i;
|
||||
|
||||
pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, fast_validate);
|
||||
dcn32_update_dml_pipes_odm_policy_based_on_context(dc, context, pipes);
|
||||
|
||||
/* repopulate_pipes = 1 means the pipes were either split or merged. In this case
|
||||
* we have to re-calculate the DET allocation and run through DML once more to
|
||||
@ -2200,7 +2201,9 @@ bool dcn32_internal_validate_bw(struct dc *dc,
|
||||
* */
|
||||
context->bw_ctx.dml.soc.allow_for_pstate_or_stutter_in_vblank_final =
|
||||
dm_prefetch_support_uclk_fclk_and_stutter_if_possible;
|
||||
|
||||
vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, pipe_cnt);
|
||||
|
||||
if (vlevel == context->bw_ctx.dml.soc.num_states) {
|
||||
/* failed after DET size changes */
|
||||
goto validate_fail;
|
||||
|
@ -326,6 +326,25 @@ void dcn35_update_bw_bounding_box_fpu(struct dc *dc,
|
||||
dcn3_5_soc.dram_clock_change_latency_us =
|
||||
dc->debug.dram_clock_change_latency_ns / 1000.0;
|
||||
}
|
||||
|
||||
if (dc->bb_overrides.dram_clock_change_latency_ns > 0)
|
||||
dcn3_5_soc.dram_clock_change_latency_us =
|
||||
dc->bb_overrides.dram_clock_change_latency_ns / 1000.0;
|
||||
|
||||
if (dc->bb_overrides.sr_exit_time_ns > 0)
|
||||
dcn3_5_soc.sr_exit_time_us = dc->bb_overrides.sr_exit_time_ns / 1000.0;
|
||||
|
||||
if (dc->bb_overrides.sr_enter_plus_exit_time_ns > 0)
|
||||
dcn3_5_soc.sr_enter_plus_exit_time_us =
|
||||
dc->bb_overrides.sr_enter_plus_exit_time_ns / 1000.0;
|
||||
|
||||
if (dc->bb_overrides.sr_exit_z8_time_ns > 0)
|
||||
dcn3_5_soc.sr_exit_z8_time_us = dc->bb_overrides.sr_exit_z8_time_ns / 1000.0;
|
||||
|
||||
if (dc->bb_overrides.sr_enter_plus_exit_z8_time_ns > 0)
|
||||
dcn3_5_soc.sr_enter_plus_exit_z8_time_us =
|
||||
dc->bb_overrides.sr_enter_plus_exit_z8_time_ns / 1000.0;
|
||||
|
||||
/*temp till dml2 fully work without dml1*/
|
||||
dml_init_instance(&dc->dml, &dcn3_5_soc, &dcn3_5_ip,
|
||||
DML_PROJECT_DCN31);
|
||||
|
@ -341,6 +341,9 @@ void dml2_init_soc_states(struct dml2_context *dml2, const struct dc *in_dc,
|
||||
break;
|
||||
}
|
||||
|
||||
if (dml2->config.bbox_overrides.clks_table.num_states)
|
||||
p->in_states->num_states = dml2->config.bbox_overrides.clks_table.num_states;
|
||||
|
||||
/* Override from passed values, if available */
|
||||
for (i = 0; i < p->in_states->num_states; i++) {
|
||||
if (dml2->config.bbox_overrides.sr_exit_latency_us) {
|
||||
@ -397,7 +400,6 @@ void dml2_init_soc_states(struct dml2_context *dml2, const struct dc *in_dc,
|
||||
}
|
||||
/* Copy clocks tables entries, if available */
|
||||
if (dml2->config.bbox_overrides.clks_table.num_states) {
|
||||
p->in_states->num_states = dml2->config.bbox_overrides.clks_table.num_states;
|
||||
|
||||
for (i = 0; i < dml2->config.bbox_overrides.clks_table.num_entries_per_clk.num_dcfclk_levels; i++) {
|
||||
p->in_states->state_array[i].dcfclk_mhz = dml2->config.bbox_overrides.clks_table.clk_entries[i].dcfclk_mhz;
|
||||
|
@ -157,6 +157,17 @@ bool is_dp2p0_output_encoder(const struct pipe_ctx *pipe_ctx)
|
||||
{
|
||||
/* If this assert is hit then we have a link encoder dynamic management issue */
|
||||
ASSERT(pipe_ctx->stream_res.hpo_dp_stream_enc ? pipe_ctx->link_res.hpo_dp_link_enc != NULL : true);
|
||||
|
||||
if (pipe_ctx->stream == NULL)
|
||||
return false;
|
||||
/* Count MST hubs once by treating only 1st remote sink in topology as an encoder */
|
||||
if (pipe_ctx->stream->link && pipe_ctx->stream->link->remote_sinks[0]) {
|
||||
return (pipe_ctx->stream_res.hpo_dp_stream_enc &&
|
||||
pipe_ctx->link_res.hpo_dp_link_enc &&
|
||||
dc_is_dp_signal(pipe_ctx->stream->signal) &&
|
||||
(pipe_ctx->stream->link->remote_sinks[0] == pipe_ctx->stream->sink));
|
||||
}
|
||||
|
||||
return (pipe_ctx->stream_res.hpo_dp_stream_enc &&
|
||||
pipe_ctx->link_res.hpo_dp_link_enc &&
|
||||
dc_is_dp_signal(pipe_ctx->stream->signal));
|
||||
|
@ -691,10 +691,15 @@ bool dml2_validate(const struct dc *in_dc, struct dc_state *context, bool fast_v
|
||||
return out;
|
||||
}
|
||||
|
||||
static inline struct dml2_context *dml2_allocate_memory(void)
|
||||
{
|
||||
return (struct dml2_context *) kzalloc(sizeof(struct dml2_context), GFP_KERNEL);
|
||||
}
|
||||
|
||||
bool dml2_create(const struct dc *in_dc, const struct dml2_configuration_options *config, struct dml2_context **dml2)
|
||||
{
|
||||
// Allocate Mode Lib Ctx
|
||||
*dml2 = (struct dml2_context *) kzalloc(sizeof(struct dml2_context), GFP_KERNEL);
|
||||
*dml2 = dml2_allocate_memory();
|
||||
|
||||
if (!(*dml2))
|
||||
return false;
|
||||
@ -745,3 +750,25 @@ void dml2_extract_dram_and_fclk_change_support(struct dml2_context *dml2,
|
||||
*fclk_change_support = (unsigned int) dml2->v20.dml_core_ctx.ms.support.FCLKChangeSupport[0];
|
||||
*dram_clk_change_support = (unsigned int) dml2->v20.dml_core_ctx.ms.support.DRAMClockChangeSupport[0];
|
||||
}
|
||||
|
||||
void dml2_copy(struct dml2_context *dst_dml2,
|
||||
struct dml2_context *src_dml2)
|
||||
{
|
||||
/* copy Mode Lib Ctx */
|
||||
memcpy(dst_dml2, src_dml2, sizeof(struct dml2_context));
|
||||
}
|
||||
|
||||
bool dml2_create_copy(struct dml2_context **dst_dml2,
|
||||
struct dml2_context *src_dml2)
|
||||
{
|
||||
/* Allocate Mode Lib Ctx */
|
||||
*dst_dml2 = dml2_allocate_memory();
|
||||
|
||||
if (!(*dst_dml2))
|
||||
return false;
|
||||
|
||||
/* copy Mode Lib Ctx */
|
||||
dml2_copy(*dst_dml2, src_dml2);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
@ -191,6 +191,10 @@ bool dml2_create(const struct dc *in_dc,
|
||||
struct dml2_context **dml2);
|
||||
|
||||
void dml2_destroy(struct dml2_context *dml2);
|
||||
void dml2_copy(struct dml2_context *dst_dml2,
|
||||
struct dml2_context *src_dml2);
|
||||
bool dml2_create_copy(struct dml2_context **dst_dml2,
|
||||
struct dml2_context *src_dml2);
|
||||
|
||||
/*
|
||||
* dml2_validate - Determines if a display configuration is supported or not.
|
||||
|
@ -790,7 +790,7 @@ void dce110_edp_power_control(
|
||||
struct dc_context *ctx = link->ctx;
|
||||
struct bp_transmitter_control cntl = { 0 };
|
||||
enum bp_result bp_result;
|
||||
uint8_t panel_instance;
|
||||
uint8_t pwrseq_instance;
|
||||
|
||||
|
||||
if (dal_graphics_object_id_get_connector_id(link->link_enc->connector)
|
||||
@ -873,7 +873,7 @@ void dce110_edp_power_control(
|
||||
cntl.coherent = false;
|
||||
cntl.lanes_number = LANE_COUNT_FOUR;
|
||||
cntl.hpd_sel = link->link_enc->hpd_source;
|
||||
panel_instance = link->panel_cntl->inst;
|
||||
pwrseq_instance = link->panel_cntl->pwrseq_inst;
|
||||
|
||||
if (ctx->dc->ctx->dmub_srv &&
|
||||
ctx->dc->debug.dmub_command_table) {
|
||||
@ -881,11 +881,11 @@ void dce110_edp_power_control(
|
||||
if (cntl.action == TRANSMITTER_CONTROL_POWER_ON) {
|
||||
bp_result = ctx->dc_bios->funcs->enable_lvtma_control(ctx->dc_bios,
|
||||
LVTMA_CONTROL_POWER_ON,
|
||||
panel_instance, link->link_powered_externally);
|
||||
pwrseq_instance, link->link_powered_externally);
|
||||
} else {
|
||||
bp_result = ctx->dc_bios->funcs->enable_lvtma_control(ctx->dc_bios,
|
||||
LVTMA_CONTROL_POWER_OFF,
|
||||
panel_instance, link->link_powered_externally);
|
||||
pwrseq_instance, link->link_powered_externally);
|
||||
}
|
||||
}
|
||||
|
||||
@ -956,7 +956,7 @@ void dce110_edp_backlight_control(
|
||||
{
|
||||
struct dc_context *ctx = link->ctx;
|
||||
struct bp_transmitter_control cntl = { 0 };
|
||||
uint8_t panel_instance;
|
||||
uint8_t pwrseq_instance;
|
||||
unsigned int pre_T11_delay = OLED_PRE_T11_DELAY;
|
||||
unsigned int post_T7_delay = OLED_POST_T7_DELAY;
|
||||
|
||||
@ -1009,7 +1009,7 @@ void dce110_edp_backlight_control(
|
||||
*/
|
||||
/* dc_service_sleep_in_milliseconds(50); */
|
||||
/*edp 1.2*/
|
||||
panel_instance = link->panel_cntl->inst;
|
||||
pwrseq_instance = link->panel_cntl->pwrseq_inst;
|
||||
|
||||
if (cntl.action == TRANSMITTER_CONTROL_BACKLIGHT_ON) {
|
||||
if (!link->dc->config.edp_no_power_sequencing)
|
||||
@ -1034,11 +1034,11 @@ void dce110_edp_backlight_control(
|
||||
if (cntl.action == TRANSMITTER_CONTROL_BACKLIGHT_ON)
|
||||
ctx->dc_bios->funcs->enable_lvtma_control(ctx->dc_bios,
|
||||
LVTMA_CONTROL_LCD_BLON,
|
||||
panel_instance, link->link_powered_externally);
|
||||
pwrseq_instance, link->link_powered_externally);
|
||||
else
|
||||
ctx->dc_bios->funcs->enable_lvtma_control(ctx->dc_bios,
|
||||
LVTMA_CONTROL_LCD_BLOFF,
|
||||
panel_instance, link->link_powered_externally);
|
||||
pwrseq_instance, link->link_powered_externally);
|
||||
}
|
||||
|
||||
link_transmitter_control(ctx->dc_bios, &cntl);
|
||||
|
@ -3417,7 +3417,8 @@ void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
|
||||
.h_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.horz,
|
||||
.v_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.vert,
|
||||
.rotation = pipe_ctx->plane_state->rotation,
|
||||
.mirror = pipe_ctx->plane_state->horizontal_mirror
|
||||
.mirror = pipe_ctx->plane_state->horizontal_mirror,
|
||||
.stream = pipe_ctx->stream,
|
||||
};
|
||||
bool pipe_split_on = false;
|
||||
bool odm_combine_on = (pipe_ctx->next_odm_pipe != NULL) ||
|
||||
|
@ -137,7 +137,8 @@ void dcn21_PLAT_58856_wa(struct dc_state *context, struct pipe_ctx *pipe_ctx)
|
||||
pipe_ctx->stream->dpms_off = true;
|
||||
}
|
||||
|
||||
static bool dmub_abm_set_pipe(struct abm *abm, uint32_t otg_inst, uint32_t option, uint32_t panel_inst)
|
||||
static bool dmub_abm_set_pipe(struct abm *abm, uint32_t otg_inst,
|
||||
uint32_t option, uint32_t panel_inst, uint32_t pwrseq_inst)
|
||||
{
|
||||
union dmub_rb_cmd cmd;
|
||||
struct dc_context *dc = abm->ctx;
|
||||
@ -147,6 +148,7 @@ static bool dmub_abm_set_pipe(struct abm *abm, uint32_t otg_inst, uint32_t optio
|
||||
cmd.abm_set_pipe.header.type = DMUB_CMD__ABM;
|
||||
cmd.abm_set_pipe.header.sub_type = DMUB_CMD__ABM_SET_PIPE;
|
||||
cmd.abm_set_pipe.abm_set_pipe_data.otg_inst = otg_inst;
|
||||
cmd.abm_set_pipe.abm_set_pipe_data.pwrseq_inst = pwrseq_inst;
|
||||
cmd.abm_set_pipe.abm_set_pipe_data.set_pipe_option = option;
|
||||
cmd.abm_set_pipe.abm_set_pipe_data.panel_inst = panel_inst;
|
||||
cmd.abm_set_pipe.abm_set_pipe_data.ramping_boundary = ramping_boundary;
|
||||
@ -179,7 +181,6 @@ void dcn21_set_abm_immediate_disable(struct pipe_ctx *pipe_ctx)
|
||||
struct abm *abm = pipe_ctx->stream_res.abm;
|
||||
uint32_t otg_inst = pipe_ctx->stream_res.tg->inst;
|
||||
struct panel_cntl *panel_cntl = pipe_ctx->stream->link->panel_cntl;
|
||||
|
||||
struct dmcu *dmcu = pipe_ctx->stream->ctx->dc->res_pool->dmcu;
|
||||
|
||||
if (dmcu) {
|
||||
@ -190,9 +191,13 @@ void dcn21_set_abm_immediate_disable(struct pipe_ctx *pipe_ctx)
|
||||
if (abm && panel_cntl) {
|
||||
if (abm->funcs && abm->funcs->set_pipe_ex) {
|
||||
abm->funcs->set_pipe_ex(abm, otg_inst, SET_ABM_PIPE_IMMEDIATELY_DISABLE,
|
||||
panel_cntl->inst);
|
||||
panel_cntl->inst, panel_cntl->pwrseq_inst);
|
||||
} else {
|
||||
dmub_abm_set_pipe(abm, otg_inst, SET_ABM_PIPE_IMMEDIATELY_DISABLE, panel_cntl->inst);
|
||||
dmub_abm_set_pipe(abm,
|
||||
otg_inst,
|
||||
SET_ABM_PIPE_IMMEDIATELY_DISABLE,
|
||||
panel_cntl->inst,
|
||||
panel_cntl->pwrseq_inst);
|
||||
}
|
||||
panel_cntl->funcs->store_backlight_level(panel_cntl);
|
||||
}
|
||||
@ -212,9 +217,16 @@ void dcn21_set_pipe(struct pipe_ctx *pipe_ctx)
|
||||
|
||||
if (abm && panel_cntl) {
|
||||
if (abm->funcs && abm->funcs->set_pipe_ex) {
|
||||
abm->funcs->set_pipe_ex(abm, otg_inst, SET_ABM_PIPE_NORMAL, panel_cntl->inst);
|
||||
abm->funcs->set_pipe_ex(abm,
|
||||
otg_inst,
|
||||
SET_ABM_PIPE_NORMAL,
|
||||
panel_cntl->inst,
|
||||
panel_cntl->pwrseq_inst);
|
||||
} else {
|
||||
dmub_abm_set_pipe(abm, otg_inst, SET_ABM_PIPE_NORMAL, panel_cntl->inst);
|
||||
dmub_abm_set_pipe(abm, otg_inst,
|
||||
SET_ABM_PIPE_NORMAL,
|
||||
panel_cntl->inst,
|
||||
panel_cntl->pwrseq_inst);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -237,9 +249,17 @@ bool dcn21_set_backlight_level(struct pipe_ctx *pipe_ctx,
|
||||
|
||||
if (abm && panel_cntl) {
|
||||
if (abm->funcs && abm->funcs->set_pipe_ex) {
|
||||
abm->funcs->set_pipe_ex(abm, otg_inst, SET_ABM_PIPE_NORMAL, panel_cntl->inst);
|
||||
abm->funcs->set_pipe_ex(abm,
|
||||
otg_inst,
|
||||
SET_ABM_PIPE_NORMAL,
|
||||
panel_cntl->inst,
|
||||
panel_cntl->pwrseq_inst);
|
||||
} else {
|
||||
dmub_abm_set_pipe(abm, otg_inst, SET_ABM_PIPE_NORMAL, panel_cntl->inst);
|
||||
dmub_abm_set_pipe(abm,
|
||||
otg_inst,
|
||||
SET_ABM_PIPE_NORMAL,
|
||||
panel_cntl->inst,
|
||||
panel_cntl->pwrseq_inst);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -367,6 +367,10 @@ void dcn30_enable_writeback(
|
||||
DC_LOG_DWB("%s dwb_pipe_inst = %d, mpcc_inst = %d",\
|
||||
__func__, wb_info->dwb_pipe_inst,\
|
||||
wb_info->mpcc_inst);
|
||||
|
||||
/* Warmup interface */
|
||||
dcn30_mmhubbub_warmup(dc, 1, wb_info);
|
||||
|
||||
/* Update writeback pipe */
|
||||
dcn30_set_writeback(dc, wb_info, context);
|
||||
|
||||
|
@ -96,7 +96,8 @@ static void enable_memory_low_power(struct dc *dc)
|
||||
if (dc->debug.enable_mem_low_power.bits.vpg && dc->res_pool->stream_enc[0]->vpg->funcs->vpg_powerdown) {
|
||||
// Power down VPGs
|
||||
for (i = 0; i < dc->res_pool->stream_enc_count; i++)
|
||||
dc->res_pool->stream_enc[i]->vpg->funcs->vpg_powerdown(dc->res_pool->stream_enc[i]->vpg);
|
||||
if (dc->res_pool->stream_enc[i]->vpg)
|
||||
dc->res_pool->stream_enc[i]->vpg->funcs->vpg_powerdown(dc->res_pool->stream_enc[i]->vpg);
|
||||
#if defined(CONFIG_DRM_AMD_DC_FP)
|
||||
for (i = 0; i < dc->res_pool->hpo_dp_stream_enc_count; i++)
|
||||
dc->res_pool->hpo_dp_stream_enc[i]->vpg->funcs->vpg_powerdown(dc->res_pool->hpo_dp_stream_enc[i]->vpg);
|
||||
|
@ -989,9 +989,22 @@ static int calc_mpc_flow_ctrl_cnt(const struct dc_stream_state *stream,
|
||||
static void update_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable)
|
||||
{
|
||||
struct display_stream_compressor *dsc = pipe_ctx->stream_res.dsc;
|
||||
struct dc *dc = pipe_ctx->stream->ctx->dc;
|
||||
struct dc_stream_state *stream = pipe_ctx->stream;
|
||||
struct pipe_ctx *odm_pipe;
|
||||
int opp_cnt = 1;
|
||||
struct dccg *dccg = dc->res_pool->dccg;
|
||||
/* It has been found that when DSCCLK is lower than 16Mhz, we will get DCN
|
||||
* register access hung. When DSCCLk is based on refclk, DSCCLk is always a
|
||||
* fixed value higher than 16Mhz so the issue doesn't occur. When DSCCLK is
|
||||
* generated by DTO, DSCCLK would be based on 1/3 dispclk. For small timings
|
||||
* with DSC such as 480p60Hz, the dispclk could be low enough to trigger
|
||||
* this problem. We are implementing a workaround here to keep using dscclk
|
||||
* based on fixed value refclk when timing is smaller than 3x16Mhz (i.e
|
||||
* 48Mhz) pixel clock to avoid hitting this problem.
|
||||
*/
|
||||
bool should_use_dto_dscclk = (dccg->funcs->set_dto_dscclk != NULL) &&
|
||||
stream->timing.pix_clk_100hz > 480000;
|
||||
|
||||
ASSERT(dsc);
|
||||
for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe)
|
||||
@ -1014,12 +1027,16 @@ static void update_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable)
|
||||
|
||||
dsc->funcs->dsc_set_config(dsc, &dsc_cfg, &dsc_optc_cfg);
|
||||
dsc->funcs->dsc_enable(dsc, pipe_ctx->stream_res.opp->inst);
|
||||
if (should_use_dto_dscclk)
|
||||
dccg->funcs->set_dto_dscclk(dccg, dsc->inst);
|
||||
for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) {
|
||||
struct display_stream_compressor *odm_dsc = odm_pipe->stream_res.dsc;
|
||||
|
||||
ASSERT(odm_dsc);
|
||||
odm_dsc->funcs->dsc_set_config(odm_dsc, &dsc_cfg, &dsc_optc_cfg);
|
||||
odm_dsc->funcs->dsc_enable(odm_dsc, odm_pipe->stream_res.opp->inst);
|
||||
if (should_use_dto_dscclk)
|
||||
dccg->funcs->set_dto_dscclk(dccg, odm_dsc->inst);
|
||||
}
|
||||
dsc_cfg.dc_dsc_cfg.num_slices_h *= opp_cnt;
|
||||
dsc_cfg.pic_width *= opp_cnt;
|
||||
@ -1039,9 +1056,13 @@ static void update_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable)
|
||||
OPTC_DSC_DISABLED, 0, 0);
|
||||
|
||||
/* disable DSC block */
|
||||
if (dccg->funcs->set_ref_dscclk)
|
||||
dccg->funcs->set_ref_dscclk(dccg, pipe_ctx->stream_res.dsc->inst);
|
||||
dsc->funcs->dsc_disable(pipe_ctx->stream_res.dsc);
|
||||
for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) {
|
||||
ASSERT(odm_pipe->stream_res.dsc);
|
||||
if (dccg->funcs->set_ref_dscclk)
|
||||
dccg->funcs->set_ref_dscclk(dccg, odm_pipe->stream_res.dsc->inst);
|
||||
odm_pipe->stream_res.dsc->funcs->dsc_disable(odm_pipe->stream_res.dsc);
|
||||
}
|
||||
}
|
||||
@ -1124,6 +1145,10 @@ void dcn32_update_odm(struct dc *dc, struct dc_state *context, struct pipe_ctx *
|
||||
if (!pipe_ctx->next_odm_pipe && current_pipe_ctx->next_odm_pipe &&
|
||||
current_pipe_ctx->next_odm_pipe->stream_res.dsc) {
|
||||
struct display_stream_compressor *dsc = current_pipe_ctx->next_odm_pipe->stream_res.dsc;
|
||||
struct dccg *dccg = dc->res_pool->dccg;
|
||||
|
||||
if (dccg->funcs->set_ref_dscclk)
|
||||
dccg->funcs->set_ref_dscclk(dccg, dsc->inst);
|
||||
/* disconnect DSC block from stream */
|
||||
dsc->funcs->dsc_disconnect(dsc);
|
||||
}
|
||||
|
@ -979,6 +979,8 @@ void dcn35_calc_blocks_to_gate(struct dc *dc, struct dc_state *context,
|
||||
bool hpo_frl_stream_enc_acquired = false;
|
||||
bool hpo_dp_stream_enc_acquired = false;
|
||||
int i = 0, j = 0;
|
||||
int edp_num = 0;
|
||||
struct dc_link *edp_links[MAX_NUM_EDP] = { NULL };
|
||||
|
||||
memset(update_state, 0, sizeof(struct pg_block_update));
|
||||
|
||||
@ -1019,10 +1021,24 @@ void dcn35_calc_blocks_to_gate(struct dc *dc, struct dc_state *context,
|
||||
|
||||
if (pipe_ctx->stream_res.opp)
|
||||
update_state->pg_pipe_res_update[PG_OPP][pipe_ctx->stream_res.opp->inst] = false;
|
||||
|
||||
if (pipe_ctx->stream_res.tg)
|
||||
update_state->pg_pipe_res_update[PG_OPTC][pipe_ctx->stream_res.tg->inst] = false;
|
||||
}
|
||||
/*domain24 controls all the otg, mpc, opp, as long as one otg is still up, avoid enabling OTG PG*/
|
||||
for (i = 0; i < dc->res_pool->timing_generator_count; i++) {
|
||||
struct timing_generator *tg = dc->res_pool->timing_generators[i];
|
||||
if (tg && tg->funcs->is_tg_enabled(tg)) {
|
||||
update_state->pg_pipe_res_update[PG_OPTC][i] = false;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
dc_get_edp_links(dc, edp_links, &edp_num);
|
||||
if (edp_num == 0 ||
|
||||
((!edp_links[0] || !edp_links[0]->edp_sink_present) &&
|
||||
(!edp_links[1] || !edp_links[1]->edp_sink_present))) {
|
||||
/*eDP not exist on this config, keep Domain24 power on, for S0i3, this will be handled in dmubfw*/
|
||||
update_state->pg_pipe_res_update[PG_OPTC][0] = false;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
void dcn35_calc_blocks_to_ungate(struct dc *dc, struct dc_state *context,
|
||||
@ -1156,8 +1172,10 @@ void dcn35_block_power_control(struct dc *dc,
|
||||
pg_cntl->funcs->dwb_pg_control(pg_cntl, power_on);
|
||||
}
|
||||
|
||||
/*this will need all the clients to unregister optc interruts let dmubfw handle this*/
|
||||
if (pg_cntl->funcs->plane_otg_pg_control)
|
||||
pg_cntl->funcs->plane_otg_pg_control(pg_cntl, power_on);
|
||||
|
||||
}
|
||||
|
||||
void dcn35_root_clock_control(struct dc *dc,
|
||||
|
@ -64,7 +64,8 @@ struct abm_funcs {
|
||||
bool (*set_pipe_ex)(struct abm *abm,
|
||||
unsigned int otg_inst,
|
||||
unsigned int option,
|
||||
unsigned int panel_inst);
|
||||
unsigned int panel_inst,
|
||||
unsigned int pwrseq_inst);
|
||||
};
|
||||
|
||||
#endif
|
||||
|
@ -201,6 +201,10 @@ struct dccg_funcs {
|
||||
struct dccg *dccg,
|
||||
enum streamclk_source src,
|
||||
uint32_t otg_inst);
|
||||
void (*set_dto_dscclk)(
|
||||
struct dccg *dccg,
|
||||
uint32_t dsc_inst);
|
||||
void (*set_ref_dscclk)(struct dccg *dccg, uint32_t dsc_inst);
|
||||
};
|
||||
|
||||
#endif //__DAL_DCCG_H__
|
||||
|
@ -188,6 +188,10 @@ struct dwbc_funcs {
|
||||
bool (*is_enabled)(
|
||||
struct dwbc *dwbc);
|
||||
|
||||
void (*set_fc_enable)(
|
||||
struct dwbc *dwbc,
|
||||
enum dwb_frame_capture_enable enable);
|
||||
|
||||
void (*set_stereo)(
|
||||
struct dwbc *dwbc,
|
||||
struct dwb_stereo_params *stereo_params);
|
||||
|
@ -56,12 +56,14 @@ struct panel_cntl_funcs {
|
||||
struct panel_cntl_init_data {
|
||||
struct dc_context *ctx;
|
||||
uint32_t inst;
|
||||
uint32_t pwrseq_inst;
|
||||
};
|
||||
|
||||
struct panel_cntl {
|
||||
const struct panel_cntl_funcs *funcs;
|
||||
struct dc_context *ctx;
|
||||
uint32_t inst;
|
||||
uint32_t pwrseq_inst;
|
||||
/* registers setting needs to be saved and restored at InitBacklight */
|
||||
struct panel_cntl_backlight_registers stored_backlight_registers;
|
||||
};
|
||||
|
@ -776,10 +776,26 @@ static bool dp_set_dsc_on_rx(struct pipe_ctx *pipe_ctx, bool enable)
|
||||
*/
|
||||
void link_set_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable)
|
||||
{
|
||||
/* TODO: Move this to HWSS as this is hardware programming sequence not a
|
||||
* link layer sequence
|
||||
*/
|
||||
struct display_stream_compressor *dsc = pipe_ctx->stream_res.dsc;
|
||||
struct dc *dc = pipe_ctx->stream->ctx->dc;
|
||||
struct dc_stream_state *stream = pipe_ctx->stream;
|
||||
struct pipe_ctx *odm_pipe;
|
||||
int opp_cnt = 1;
|
||||
struct dccg *dccg = dc->res_pool->dccg;
|
||||
/* It has been found that when DSCCLK is lower than 16Mhz, we will get DCN
|
||||
* register access hung. When DSCCLk is based on refclk, DSCCLk is always a
|
||||
* fixed value higher than 16Mhz so the issue doesn't occur. When DSCCLK is
|
||||
* generated by DTO, DSCCLK would be based on 1/3 dispclk. For small timings
|
||||
* with DSC such as 480p60Hz, the dispclk could be low enough to trigger
|
||||
* this problem. We are implementing a workaround here to keep using dscclk
|
||||
* based on fixed value refclk when timing is smaller than 3x16Mhz (i.e
|
||||
* 48Mhz) pixel clock to avoid hitting this problem.
|
||||
*/
|
||||
bool should_use_dto_dscclk = (dccg->funcs->set_dto_dscclk != NULL) &&
|
||||
stream->timing.pix_clk_100hz > 480000;
|
||||
DC_LOGGER_INIT(dsc->ctx->logger);
|
||||
|
||||
for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe)
|
||||
@ -802,11 +818,15 @@ void link_set_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable)
|
||||
|
||||
dsc->funcs->dsc_set_config(dsc, &dsc_cfg, &dsc_optc_cfg);
|
||||
dsc->funcs->dsc_enable(dsc, pipe_ctx->stream_res.opp->inst);
|
||||
if (should_use_dto_dscclk)
|
||||
dccg->funcs->set_dto_dscclk(dccg, dsc->inst);
|
||||
for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) {
|
||||
struct display_stream_compressor *odm_dsc = odm_pipe->stream_res.dsc;
|
||||
|
||||
odm_dsc->funcs->dsc_set_config(odm_dsc, &dsc_cfg, &dsc_optc_cfg);
|
||||
odm_dsc->funcs->dsc_enable(odm_dsc, odm_pipe->stream_res.opp->inst);
|
||||
if (should_use_dto_dscclk)
|
||||
dccg->funcs->set_dto_dscclk(dccg, odm_dsc->inst);
|
||||
}
|
||||
dsc_cfg.dc_dsc_cfg.num_slices_h *= opp_cnt;
|
||||
dsc_cfg.pic_width *= opp_cnt;
|
||||
@ -856,9 +876,14 @@ void link_set_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable)
|
||||
}
|
||||
|
||||
/* disable DSC block */
|
||||
if (dccg->funcs->set_ref_dscclk)
|
||||
dccg->funcs->set_ref_dscclk(dccg, pipe_ctx->stream_res.dsc->inst);
|
||||
pipe_ctx->stream_res.dsc->funcs->dsc_disable(pipe_ctx->stream_res.dsc);
|
||||
for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe)
|
||||
for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) {
|
||||
if (dccg->funcs->set_ref_dscclk)
|
||||
dccg->funcs->set_ref_dscclk(dccg, odm_pipe->stream_res.dsc->inst);
|
||||
odm_pipe->stream_res.dsc->funcs->dsc_disable(odm_pipe->stream_res.dsc);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -1057,18 +1082,21 @@ static struct fixed31_32 get_pbn_from_bw_in_kbps(uint64_t kbps)
|
||||
uint32_t denominator = 1;
|
||||
|
||||
/*
|
||||
* margin 5300ppm + 300ppm ~ 0.6% as per spec, factor is 1.006
|
||||
* The 1.006 factor (margin 5300ppm + 300ppm ~ 0.6% as per spec) is not
|
||||
* required when determining PBN/time slot utilization on the link between
|
||||
* us and the branch, since that overhead is already accounted for in
|
||||
* the get_pbn_per_slot function.
|
||||
*
|
||||
* The unit of 54/64Mbytes/sec is an arbitrary unit chosen based on
|
||||
* common multiplier to render an integer PBN for all link rate/lane
|
||||
* counts combinations
|
||||
* calculate
|
||||
* peak_kbps *= (1006/1000)
|
||||
* peak_kbps *= (64/54)
|
||||
* peak_kbps *= 8 convert to bytes
|
||||
* peak_kbps /= (8 * 1000) convert to bytes
|
||||
*/
|
||||
|
||||
numerator = 64 * PEAK_FACTOR_X1000;
|
||||
denominator = 54 * 8 * 1000 * 1000;
|
||||
numerator = 64;
|
||||
denominator = 54 * 8 * 1000;
|
||||
kbps *= numerator;
|
||||
peak_kbps = dc_fixpt_from_fraction(kbps, denominator);
|
||||
|
||||
|
@ -369,6 +369,30 @@ static enum transmitter translate_encoder_to_transmitter(
|
||||
}
|
||||
}
|
||||
|
||||
static uint8_t translate_dig_inst_to_pwrseq_inst(struct dc_link *link)
|
||||
{
|
||||
uint8_t pwrseq_inst = 0xF;
|
||||
struct dc_context *dc_ctx = link->dc->ctx;
|
||||
|
||||
DC_LOGGER_INIT(dc_ctx->logger);
|
||||
|
||||
switch (link->eng_id) {
|
||||
case ENGINE_ID_DIGA:
|
||||
pwrseq_inst = 0;
|
||||
break;
|
||||
case ENGINE_ID_DIGB:
|
||||
pwrseq_inst = 1;
|
||||
break;
|
||||
default:
|
||||
DC_LOG_WARNING("Unsupported pwrseq engine id: %d!\n", link->eng_id);
|
||||
ASSERT(false);
|
||||
break;
|
||||
}
|
||||
|
||||
return pwrseq_inst;
|
||||
}
|
||||
|
||||
|
||||
static void link_destruct(struct dc_link *link)
|
||||
{
|
||||
int i;
|
||||
@ -596,24 +620,6 @@ static bool construct_phy(struct dc_link *link,
|
||||
link->ddc_hw_inst =
|
||||
dal_ddc_get_line(get_ddc_pin(link->ddc));
|
||||
|
||||
|
||||
if (link->dc->res_pool->funcs->panel_cntl_create &&
|
||||
(link->link_id.id == CONNECTOR_ID_EDP ||
|
||||
link->link_id.id == CONNECTOR_ID_LVDS)) {
|
||||
panel_cntl_init_data.ctx = dc_ctx;
|
||||
panel_cntl_init_data.inst =
|
||||
panel_cntl_init_data.ctx->dc_edp_id_count;
|
||||
link->panel_cntl =
|
||||
link->dc->res_pool->funcs->panel_cntl_create(
|
||||
&panel_cntl_init_data);
|
||||
panel_cntl_init_data.ctx->dc_edp_id_count++;
|
||||
|
||||
if (link->panel_cntl == NULL) {
|
||||
DC_ERROR("Failed to create link panel_cntl!\n");
|
||||
goto panel_cntl_create_fail;
|
||||
}
|
||||
}
|
||||
|
||||
enc_init_data.ctx = dc_ctx;
|
||||
bp_funcs->get_src_obj(dc_ctx->dc_bios, link->link_id, 0,
|
||||
&enc_init_data.encoder);
|
||||
@ -644,6 +650,23 @@ static bool construct_phy(struct dc_link *link,
|
||||
link->dc->res_pool->dig_link_enc_count++;
|
||||
|
||||
link->link_enc_hw_inst = link->link_enc->transmitter;
|
||||
|
||||
if (link->dc->res_pool->funcs->panel_cntl_create &&
|
||||
(link->link_id.id == CONNECTOR_ID_EDP ||
|
||||
link->link_id.id == CONNECTOR_ID_LVDS)) {
|
||||
panel_cntl_init_data.ctx = dc_ctx;
|
||||
panel_cntl_init_data.inst = panel_cntl_init_data.ctx->dc_edp_id_count;
|
||||
panel_cntl_init_data.pwrseq_inst = translate_dig_inst_to_pwrseq_inst(link);
|
||||
link->panel_cntl =
|
||||
link->dc->res_pool->funcs->panel_cntl_create(
|
||||
&panel_cntl_init_data);
|
||||
panel_cntl_init_data.ctx->dc_edp_id_count++;
|
||||
|
||||
if (link->panel_cntl == NULL) {
|
||||
DC_ERROR("Failed to create link panel_cntl!\n");
|
||||
goto panel_cntl_create_fail;
|
||||
}
|
||||
}
|
||||
for (i = 0; i < 4; i++) {
|
||||
if (bp_funcs->get_device_tag(dc_ctx->dc_bios,
|
||||
link->link_id, i,
|
||||
|
@ -190,9 +190,6 @@ static void handle_hpd_irq_replay_sink(struct dc_link *link)
|
||||
/*AMD Replay version reuse DP_PSR_ERROR_STATUS for REPLAY_ERROR status.*/
|
||||
union psr_error_status replay_error_status;
|
||||
|
||||
if (link->replay_settings.config.force_disable_desync_error_check)
|
||||
return;
|
||||
|
||||
if (!link->replay_settings.replay_feature_enabled)
|
||||
return;
|
||||
|
||||
@ -210,9 +207,6 @@ static void handle_hpd_irq_replay_sink(struct dc_link *link)
|
||||
&replay_error_status.raw,
|
||||
sizeof(replay_error_status.raw));
|
||||
|
||||
if (replay_configuration.bits.DESYNC_ERROR_STATUS)
|
||||
link->replay_settings.config.received_desync_error_hpd = 1;
|
||||
|
||||
link->replay_settings.config.replay_error_status.bits.LINK_CRC_ERROR =
|
||||
replay_error_status.bits.LINK_CRC_ERROR;
|
||||
link->replay_settings.config.replay_error_status.bits.DESYNC_ERROR =
|
||||
@ -225,6 +219,12 @@ static void handle_hpd_irq_replay_sink(struct dc_link *link)
|
||||
link->replay_settings.config.replay_error_status.bits.STATE_TRANSITION_ERROR) {
|
||||
bool allow_active;
|
||||
|
||||
if (link->replay_settings.config.replay_error_status.bits.DESYNC_ERROR)
|
||||
link->replay_settings.config.received_desync_error_hpd = 1;
|
||||
|
||||
if (link->replay_settings.config.force_disable_desync_error_check)
|
||||
return;
|
||||
|
||||
/* Acknowledge and clear configuration bits */
|
||||
dm_helpers_dp_write_dpcd(
|
||||
link->ctx,
|
||||
@ -265,7 +265,7 @@ void dp_handle_link_loss(struct dc_link *link)
|
||||
|
||||
for (i = count - 1; i >= 0; i--) {
|
||||
// Always use max settings here for DP 1.4a LL Compliance CTS
|
||||
if (link->is_automated) {
|
||||
if (link->skip_fallback_on_link_loss) {
|
||||
pipes[i]->link_config.dp_link_settings.lane_count =
|
||||
link->verified_link_cap.lane_count;
|
||||
pipes[i]->link_config.dp_link_settings.link_rate =
|
||||
@ -404,7 +404,9 @@ bool dp_handle_hpd_rx_irq(struct dc_link *link,
|
||||
|
||||
if (hpd_irq_dpcd_data.bytes.device_service_irq.bits.AUTOMATED_TEST) {
|
||||
// Workaround for DP 1.4a LL Compliance CTS as USB4 has to share encoders unlike DP and USBC
|
||||
link->is_automated = true;
|
||||
if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA)
|
||||
link->skip_fallback_on_link_loss = true;
|
||||
|
||||
device_service_clear.bits.AUTOMATED_TEST = 1;
|
||||
core_link_write_dpcd(
|
||||
link,
|
||||
|
@ -811,7 +811,7 @@ static enum link_training_result dpia_training_eq_transparent(
|
||||
/* Take into consideration corner case for DP 1.4a LL Compliance CTS as USB4
|
||||
* has to share encoders unlike DP and USBC
|
||||
*/
|
||||
if (dp_is_interlane_aligned(dpcd_lane_status_updated) || (link->is_automated && retries_eq)) {
|
||||
if (dp_is_interlane_aligned(dpcd_lane_status_updated) || (link->skip_fallback_on_link_loss && retries_eq)) {
|
||||
result = LINK_TRAINING_SUCCESS;
|
||||
break;
|
||||
}
|
||||
@ -1037,7 +1037,7 @@ enum link_training_result dpia_perform_link_training(
|
||||
*/
|
||||
if (result == LINK_TRAINING_SUCCESS) {
|
||||
fsleep(5000);
|
||||
if (!link->is_automated)
|
||||
if (!link->skip_fallback_on_link_loss)
|
||||
result = dp_check_link_loss_status(link, <_settings);
|
||||
} else if (result == LINK_TRAINING_ABORT)
|
||||
dpia_training_abort(link, <_settings, repeater_id);
|
||||
|
@ -287,8 +287,8 @@ bool set_default_brightness_aux(struct dc_link *link)
|
||||
if (link && link->dpcd_sink_ext_caps.bits.oled == 1) {
|
||||
if (!read_default_bl_aux(link, &default_backlight))
|
||||
default_backlight = 150000;
|
||||
// if > 5000, it might be wrong readback
|
||||
if (default_backlight > 5000000)
|
||||
// if < 1 nits or > 5000, it might be wrong readback
|
||||
if (default_backlight < 1000 || default_backlight > 5000000)
|
||||
default_backlight = 150000;
|
||||
|
||||
return edp_set_backlight_level_nits(link, true,
|
||||
|
@ -1250,7 +1250,10 @@ struct stream_encoder *dcn10_find_first_free_match_stream_enc_for_link(
|
||||
/* Store first available for MST second display
|
||||
* in daisy chain use case
|
||||
*/
|
||||
j = i;
|
||||
|
||||
if (pool->stream_enc[i]->id != ENGINE_ID_VIRTUAL)
|
||||
j = i;
|
||||
|
||||
if (link->ep_type == DISPLAY_ENDPOINT_PHY && pool->stream_enc[i]->id ==
|
||||
link->link_enc->preferred_engine)
|
||||
return pool->stream_enc[i];
|
||||
|
@ -1682,6 +1682,7 @@ noinline bool dcn30_internal_validate_bw(
|
||||
* We don't actually support prefetch mode 2, so require that we
|
||||
* at least support prefetch mode 1.
|
||||
*/
|
||||
context->bw_ctx.dml.validate_max_state = fast_validate;
|
||||
context->bw_ctx.dml.soc.allow_dram_self_refresh_or_dram_clock_change_in_vblank =
|
||||
dm_allow_self_refresh;
|
||||
|
||||
@ -1691,6 +1692,7 @@ noinline bool dcn30_internal_validate_bw(
|
||||
memset(merge, 0, sizeof(merge));
|
||||
vlevel = dcn20_validate_apply_pipe_split_flags(dc, context, vlevel, split, merge);
|
||||
}
|
||||
context->bw_ctx.dml.validate_max_state = false;
|
||||
}
|
||||
|
||||
dml_log_mode_support_params(&context->bw_ctx.dml);
|
||||
|
@ -193,6 +193,8 @@ bool dcn32_subvp_drr_admissable(struct dc *dc, struct dc_state *context);
|
||||
|
||||
bool dcn32_subvp_vblank_admissable(struct dc *dc, struct dc_state *context, int vlevel);
|
||||
|
||||
void dcn32_update_dml_pipes_odm_policy_based_on_context(struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes);
|
||||
|
||||
/* definitions for run time init of reg offsets */
|
||||
|
||||
/* CLK SRC */
|
||||
|
@ -736,7 +736,7 @@ static const struct dc_debug_options debug_defaults_drv = {
|
||||
.i2c = true,
|
||||
.dmcu = false, // This is previously known to cause hang on S3 cycles if enabled
|
||||
.dscl = true,
|
||||
.cm = false,
|
||||
.cm = true,
|
||||
.mpc = true,
|
||||
.optc = true,
|
||||
.vpg = true,
|
||||
|
@ -3498,6 +3498,16 @@ struct dmub_cmd_abm_set_pipe_data {
|
||||
* TODO: Remove.
|
||||
*/
|
||||
uint8_t ramping_boundary;
|
||||
|
||||
/**
|
||||
* PwrSeq HW Instance.
|
||||
*/
|
||||
uint8_t pwrseq_inst;
|
||||
|
||||
/**
|
||||
* Explicit padding to 4 byte boundary.
|
||||
*/
|
||||
uint8_t pad[3];
|
||||
};
|
||||
|
||||
/**
|
||||
@ -3878,7 +3888,7 @@ enum dmub_cmd_panel_cntl_type {
|
||||
* struct dmub_cmd_panel_cntl_data - Panel control data.
|
||||
*/
|
||||
struct dmub_cmd_panel_cntl_data {
|
||||
uint32_t inst; /**< panel instance */
|
||||
uint32_t pwrseq_inst; /**< pwrseq instance */
|
||||
uint32_t current_backlight; /* in/out */
|
||||
uint32_t bl_pwm_cntl; /* in/out */
|
||||
uint32_t bl_pwm_period_cntl; /* in/out */
|
||||
@ -3937,7 +3947,7 @@ struct dmub_cmd_lvtma_control_data {
|
||||
uint8_t uc_pwr_action; /**< LVTMA_ACTION */
|
||||
uint8_t bypass_panel_control_wait;
|
||||
uint8_t reserved_0[2]; /**< For future use */
|
||||
uint8_t panel_inst; /**< LVTMA control instance */
|
||||
uint8_t pwrseq_inst; /**< LVTMA control instance */
|
||||
uint8_t reserved_1[3]; /**< For future use */
|
||||
};
|
||||
|
||||
|
@ -64,7 +64,7 @@
|
||||
|
||||
|
||||
/* Default scratch mem size. */
|
||||
#define DMUB_SCRATCH_MEM_SIZE (256)
|
||||
#define DMUB_SCRATCH_MEM_SIZE (1024)
|
||||
|
||||
/* Number of windows in use. */
|
||||
#define DMUB_NUM_WINDOWS (DMUB_WINDOW_TOTAL)
|
||||
|
@ -257,6 +257,7 @@ enum DC_DEBUG_MASK {
|
||||
DC_DISABLE_MPO = 0x40,
|
||||
DC_DISABLE_REPLAY = 0x50,
|
||||
DC_ENABLE_DPIA_TRACE = 0x80,
|
||||
DC_ENABLE_DML2 = 0x100,
|
||||
};
|
||||
|
||||
enum amd_dpm_forced_level;
|
||||
|
102
drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_10_0_2_offset.h
Normal file
102
drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_10_0_2_offset.h
Normal file
@ -0,0 +1,102 @@
|
||||
/*
|
||||
* Copyright (C) 2023 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included
|
||||
* in all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
|
||||
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
|
||||
* AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
#ifndef _smuio_10_0_2_OFFSET_HEADER
|
||||
|
||||
// addressBlock: smuio_smuio_misc_SmuSmuioDec
|
||||
// base address: 0x5a000
|
||||
#define mmSMUIO_MCM_CONFIG 0x0023
|
||||
#define mmSMUIO_MCM_CONFIG_BASE_IDX 0
|
||||
#define mmIP_DISCOVERY_VERSION 0x0000
|
||||
#define mmIP_DISCOVERY_VERSION_BASE_IDX 1
|
||||
#define mmIO_SMUIO_PINSTRAP 0x01b1
|
||||
#define mmIO_SMUIO_PINSTRAP_BASE_IDX 1
|
||||
#define mmSCRATCH_REGISTER0 0x01b2
|
||||
#define mmSCRATCH_REGISTER0_BASE_IDX 1
|
||||
#define mmSCRATCH_REGISTER1 0x01b3
|
||||
#define mmSCRATCH_REGISTER1_BASE_IDX 1
|
||||
#define mmSCRATCH_REGISTER2 0x01b4
|
||||
#define mmSCRATCH_REGISTER2_BASE_IDX 1
|
||||
#define mmSCRATCH_REGISTER3 0x01b5
|
||||
#define mmSCRATCH_REGISTER3_BASE_IDX 1
|
||||
#define mmSCRATCH_REGISTER4 0x01b6
|
||||
#define mmSCRATCH_REGISTER4_BASE_IDX 1
|
||||
#define mmSCRATCH_REGISTER5 0x01b7
|
||||
#define mmSCRATCH_REGISTER5_BASE_IDX 1
|
||||
#define mmSCRATCH_REGISTER6 0x01b8
|
||||
#define mmSCRATCH_REGISTER6_BASE_IDX 1
|
||||
#define mmSCRATCH_REGISTER7 0x01b9
|
||||
#define mmSCRATCH_REGISTER7_BASE_IDX 1
|
||||
|
||||
|
||||
// addressBlock: smuio_smuio_reset_SmuSmuioDec
|
||||
// base address: 0x5a300
|
||||
#define mmSMUIO_MP_RESET_INTR 0x00c1
|
||||
#define mmSMUIO_MP_RESET_INTR_BASE_IDX 0
|
||||
#define mmSMUIO_SOC_HALT 0x00c2
|
||||
#define mmSMUIO_SOC_HALT_BASE_IDX 0
|
||||
#define mmSMUIO_GFX_MISC_CNTL 0x00c8
|
||||
#define mmSMUIO_GFX_MISC_CNTL_BASE_IDX 0
|
||||
|
||||
|
||||
// addressBlock: smuio_smuio_ccxctrl_SmuSmuioDec
|
||||
// base address: 0x5a000
|
||||
#define mmPWROK_REFCLK_GAP_CYCLES 0x0001
|
||||
#define mmPWROK_REFCLK_GAP_CYCLES_BASE_IDX 1
|
||||
#define mmGOLDEN_TSC_INCREMENT_UPPER 0x0004
|
||||
#define mmGOLDEN_TSC_INCREMENT_UPPER_BASE_IDX 1
|
||||
#define mmGOLDEN_TSC_INCREMENT_LOWER 0x0005
|
||||
#define mmGOLDEN_TSC_INCREMENT_LOWER_BASE_IDX 1
|
||||
#define mmGOLDEN_TSC_COUNT_UPPER 0x0025
|
||||
#define mmGOLDEN_TSC_COUNT_UPPER_BASE_IDX 1
|
||||
#define mmGOLDEN_TSC_COUNT_LOWER 0x0026
|
||||
#define mmGOLDEN_TSC_COUNT_LOWER_BASE_IDX 1
|
||||
#define mmGFX_GOLDEN_TSC_SHADOW_UPPER 0x0029
|
||||
#define mmGFX_GOLDEN_TSC_SHADOW_UPPER_BASE_IDX 1
|
||||
#define mmGFX_GOLDEN_TSC_SHADOW_LOWER 0x002a
|
||||
#define mmGFX_GOLDEN_TSC_SHADOW_LOWER_BASE_IDX 1
|
||||
#define mmSOC_GOLDEN_TSC_SHADOW_UPPER 0x002b
|
||||
#define mmSOC_GOLDEN_TSC_SHADOW_UPPER_BASE_IDX 1
|
||||
#define mmSOC_GOLDEN_TSC_SHADOW_LOWER 0x002c
|
||||
#define mmSOC_GOLDEN_TSC_SHADOW_LOWER_BASE_IDX 1
|
||||
#define mmSOC_GAP_PWROK 0x002d
|
||||
#define mmSOC_GAP_PWROK_BASE_IDX 1
|
||||
|
||||
// addressBlock: smuio_smuio_swtimer_SmuSmuioDec
|
||||
// base address: 0x5ac40
|
||||
#define mmPWR_VIRT_RESET_REQ 0x0110
|
||||
#define mmPWR_VIRT_RESET_REQ_BASE_IDX 1
|
||||
#define mmPWR_DISP_TIMER_CONTROL 0x0111
|
||||
#define mmPWR_DISP_TIMER_CONTROL_BASE_IDX 1
|
||||
#define mmPWR_DISP_TIMER2_CONTROL 0x0113
|
||||
#define mmPWR_DISP_TIMER2_CONTROL_BASE_IDX 1
|
||||
#define mmPWR_DISP_TIMER_GLOBAL_CONTROL 0x0115
|
||||
#define mmPWR_DISP_TIMER_GLOBAL_CONTROL_BASE_IDX 1
|
||||
#define mmPWR_IH_CONTROL 0x0116
|
||||
#define mmPWR_IH_CONTROL_BASE_IDX 1
|
||||
|
||||
// addressBlock: smuio_smuio_svi0_SmuSmuioDec
|
||||
// base address: 0x6f000
|
||||
#define mmSMUSVI0_TEL_PLANE0 0x520e
|
||||
#define mmSMUSVI0_TEL_PLANE0_BASE_IDX 1
|
||||
#define mmSMUSVI0_PLANE0_CURRENTVID 0x5217
|
||||
#define mmSMUSVI0_PLANE0_CURRENTVID_BASE_IDX 1
|
||||
|
||||
#endif
|
@ -0,0 +1,184 @@
|
||||
/*
|
||||
* Copyright (C) 2023 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included
|
||||
* in all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
|
||||
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
|
||||
* AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
#ifndef _smuio_10_0_2_SH_MASK_HEADER
|
||||
|
||||
// addressBlock: smuio_smuio_misc_SmuSmuioDec
|
||||
//SMUIO_MCM_CONFIG
|
||||
#define SMUIO_MCM_CONFIG__DIE_ID__SHIFT 0x0
|
||||
#define SMUIO_MCM_CONFIG__PKG_TYPE__SHIFT 0x2
|
||||
#define SMUIO_MCM_CONFIG__SOCKET_ID__SHIFT 0x5
|
||||
#define SMUIO_MCM_CONFIG__PKG_SUBTYPE__SHIFT 0x6
|
||||
#define SMUIO_MCM_CONFIG__CONSOLE_K__SHIFT 0x10
|
||||
#define SMUIO_MCM_CONFIG__CONSOLE_A__SHIFT 0x11
|
||||
#define SMUIO_MCM_CONFIG__DIE_ID_MASK 0x00000003L
|
||||
#define SMUIO_MCM_CONFIG__PKG_TYPE_MASK 0x0000001CL
|
||||
#define SMUIO_MCM_CONFIG__SOCKET_ID_MASK 0x00000020L
|
||||
#define SMUIO_MCM_CONFIG__PKG_SUBTYPE_MASK 0x000000C0L
|
||||
#define SMUIO_MCM_CONFIG__CONSOLE_K_MASK 0x00010000L
|
||||
#define SMUIO_MCM_CONFIG__CONSOLE_A_MASK 0x00020000L
|
||||
//IP_DISCOVERY_VERSION
|
||||
#define IP_DISCOVERY_VERSION__IP_DISCOVERY_VERSION__SHIFT 0x0
|
||||
#define IP_DISCOVERY_VERSION__IP_DISCOVERY_VERSION_MASK 0xFFFFFFFFL
|
||||
//IO_SMUIO_PINSTRAP
|
||||
#define IO_SMUIO_PINSTRAP__AUD_PORT_CONN__SHIFT 0x0
|
||||
#define IO_SMUIO_PINSTRAP__AUD__SHIFT 0x3
|
||||
#define IO_SMUIO_PINSTRAP__AUD_PORT_CONN_MASK 0x00000007L
|
||||
#define IO_SMUIO_PINSTRAP__AUD_MASK 0x00000018L
|
||||
//SCRATCH_REGISTER0
|
||||
#define SCRATCH_REGISTER0__ScratchPad0__SHIFT 0x0
|
||||
#define SCRATCH_REGISTER0__ScratchPad0_MASK 0xFFFFFFFFL
|
||||
//SCRATCH_REGISTER1
|
||||
#define SCRATCH_REGISTER1__ScratchPad1__SHIFT 0x0
|
||||
#define SCRATCH_REGISTER1__ScratchPad1_MASK 0xFFFFFFFFL
|
||||
//SCRATCH_REGISTER2
|
||||
#define SCRATCH_REGISTER2__ScratchPad2__SHIFT 0x0
|
||||
#define SCRATCH_REGISTER2__ScratchPad2_MASK 0xFFFFFFFFL
|
||||
//SCRATCH_REGISTER3
|
||||
#define SCRATCH_REGISTER3__ScratchPad3__SHIFT 0x0
|
||||
#define SCRATCH_REGISTER3__ScratchPad3_MASK 0xFFFFFFFFL
|
||||
//SCRATCH_REGISTER4
|
||||
#define SCRATCH_REGISTER4__ScratchPad4__SHIFT 0x0
|
||||
#define SCRATCH_REGISTER4__ScratchPad4_MASK 0xFFFFFFFFL
|
||||
//SCRATCH_REGISTER5
|
||||
#define SCRATCH_REGISTER5__ScratchPad5__SHIFT 0x0
|
||||
#define SCRATCH_REGISTER5__ScratchPad5_MASK 0xFFFFFFFFL
|
||||
//SCRATCH_REGISTER6
|
||||
#define SCRATCH_REGISTER6__ScratchPad6__SHIFT 0x0
|
||||
#define SCRATCH_REGISTER6__ScratchPad6_MASK 0xFFFFFFFFL
|
||||
//SCRATCH_REGISTER7
|
||||
#define SCRATCH_REGISTER7__ScratchPad7__SHIFT 0x0
|
||||
#define SCRATCH_REGISTER7__ScratchPad7_MASK 0xFFFFFFFFL
|
||||
|
||||
// addressBlock: smuio_smuio_reset_SmuSmuioDec
|
||||
//SMUIO_MP_RESET_INTR
|
||||
#define SMUIO_MP_RESET_INTR__SMUIO_MP_RESET_INTR__SHIFT 0x0
|
||||
#define SMUIO_MP_RESET_INTR__SMUIO_MP_RESET_INTR_MASK 0x00000001L
|
||||
//SMUIO_SOC_HALT
|
||||
#define SMUIO_SOC_HALT__WDT_FORCE_PWROK_EN__SHIFT 0x2
|
||||
#define SMUIO_SOC_HALT__WDT_FORCE_RESETn_EN__SHIFT 0x3
|
||||
#define SMUIO_SOC_HALT__WDT_FORCE_PWROK_EN_MASK 0x00000004L
|
||||
#define SMUIO_SOC_HALT__WDT_FORCE_RESETn_EN_MASK 0x00000008L
|
||||
//SMUIO_GFX_MISC_CNTL
|
||||
#define SMUIO_GFX_MISC_CNTL__SMU_GFX_cold_vs_gfxoff__SHIFT 0x0
|
||||
#define SMUIO_GFX_MISC_CNTL__PWR_GFXOFF_STATUS__SHIFT 0x1
|
||||
#define SMUIO_GFX_MISC_CNTL__PWR_GFX_DLDO_CLK_SWITCH__SHIFT 0x3
|
||||
#define SMUIO_GFX_MISC_CNTL__PWR_GFX_RLC_CGPG_EN__SHIFT 0x4
|
||||
#define SMUIO_GFX_MISC_CNTL__SMU_GFX_cold_vs_gfxoff_MASK 0x00000001L
|
||||
#define SMUIO_GFX_MISC_CNTL__PWR_GFXOFF_STATUS_MASK 0x00000006L
|
||||
#define SMUIO_GFX_MISC_CNTL__PWR_GFX_DLDO_CLK_SWITCH_MASK 0x00000008L
|
||||
#define SMUIO_GFX_MISC_CNTL__PWR_GFX_RLC_CGPG_EN_MASK 0x00000010L
|
||||
|
||||
// addressBlock: smuio_smuio_ccxctrl_SmuSmuioDec
|
||||
//PWROK_REFCLK_GAP_CYCLES
|
||||
#define PWROK_REFCLK_GAP_CYCLES__Pwrok_PreAssertion_clkgap_cycles__SHIFT 0x0
|
||||
#define PWROK_REFCLK_GAP_CYCLES__Pwrok_PostAssertion_clkgap_cycles__SHIFT 0x8
|
||||
#define PWROK_REFCLK_GAP_CYCLES__Pwrok_PreAssertion_clkgap_cycles_MASK 0x000000FFL
|
||||
#define PWROK_REFCLK_GAP_CYCLES__Pwrok_PostAssertion_clkgap_cycles_MASK 0x0000FF00L
|
||||
//GOLDEN_TSC_INCREMENT_UPPER
|
||||
#define GOLDEN_TSC_INCREMENT_UPPER__GoldenTscIncrementUpper__SHIFT 0x0
|
||||
#define GOLDEN_TSC_INCREMENT_UPPER__GoldenTscIncrementUpper_MASK 0x00FFFFFFL
|
||||
//GOLDEN_TSC_INCREMENT_LOWER
|
||||
#define GOLDEN_TSC_INCREMENT_LOWER__GoldenTscIncrementLower__SHIFT 0x0
|
||||
#define GOLDEN_TSC_INCREMENT_LOWER__GoldenTscIncrementLower_MASK 0xFFFFFFFFL
|
||||
//GOLDEN_TSC_COUNT_UPPER
|
||||
#define GOLDEN_TSC_COUNT_UPPER__GoldenTscCountUpper__SHIFT 0x0
|
||||
#define GOLDEN_TSC_COUNT_UPPER__GoldenTscCountUpper_MASK 0x00FFFFFFL
|
||||
//GOLDEN_TSC_COUNT_LOWER
|
||||
#define GOLDEN_TSC_COUNT_LOWER__GoldenTscCountLower__SHIFT 0x0
|
||||
#define GOLDEN_TSC_COUNT_LOWER__GoldenTscCountLower_MASK 0xFFFFFFFFL
|
||||
//GFX_GOLDEN_TSC_SHADOW_UPPER
|
||||
#define GFX_GOLDEN_TSC_SHADOW_UPPER__GfxGoldenTscShadowUpper__SHIFT 0x0
|
||||
#define GFX_GOLDEN_TSC_SHADOW_UPPER__GfxGoldenTscShadowUpper_MASK 0x00FFFFFFL
|
||||
//GFX_GOLDEN_TSC_SHADOW_LOWER
|
||||
#define GFX_GOLDEN_TSC_SHADOW_LOWER__GfxGoldenTscShadowLower__SHIFT 0x0
|
||||
#define GFX_GOLDEN_TSC_SHADOW_LOWER__GfxGoldenTscShadowLower_MASK 0xFFFFFFFFL
|
||||
//SOC_GOLDEN_TSC_SHADOW_UPPER
|
||||
#define SOC_GOLDEN_TSC_SHADOW_UPPER__SocGoldenTscShadowUpper__SHIFT 0x0
|
||||
#define SOC_GOLDEN_TSC_SHADOW_UPPER__SocGoldenTscShadowUpper_MASK 0x00FFFFFFL
|
||||
//SOC_GOLDEN_TSC_SHADOW_LOWER
|
||||
#define SOC_GOLDEN_TSC_SHADOW_LOWER__SocGoldenTscShadowLower__SHIFT 0x0
|
||||
#define SOC_GOLDEN_TSC_SHADOW_LOWER__SocGoldenTscShadowLower_MASK 0xFFFFFFFFL
|
||||
//SOC_GAP_PWROK
|
||||
#define SOC_GAP_PWROK__soc_gap_pwrok__SHIFT 0x0
|
||||
#define SOC_GAP_PWROK__soc_gap_pwrok_MASK 0x00000001L
|
||||
|
||||
// addressBlock: smuio_smuio_swtimer_SmuSmuioDec
|
||||
//PWR_VIRT_RESET_REQ
|
||||
#define PWR_VIRT_RESET_REQ__VF_FLR__SHIFT 0x0
|
||||
#define PWR_VIRT_RESET_REQ__PF_FLR__SHIFT 0x1f
|
||||
#define PWR_VIRT_RESET_REQ__VF_FLR_MASK 0x7FFFFFFFL
|
||||
#define PWR_VIRT_RESET_REQ__PF_FLR_MASK 0x80000000L
|
||||
//PWR_DISP_TIMER_CONTROL
|
||||
#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_COUNT__SHIFT 0x0
|
||||
#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_ENABLE__SHIFT 0x19
|
||||
#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_DISABLE__SHIFT 0x1a
|
||||
#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_MASK__SHIFT 0x1b
|
||||
#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_STAT_AK__SHIFT 0x1c
|
||||
#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_TYPE__SHIFT 0x1d
|
||||
#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_MODE__SHIFT 0x1e
|
||||
#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_COUNT_MASK 0x01FFFFFFL
|
||||
#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_ENABLE_MASK 0x02000000L
|
||||
#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_DISABLE_MASK 0x04000000L
|
||||
#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_MASK_MASK 0x08000000L
|
||||
#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_STAT_AK_MASK 0x10000000L
|
||||
#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_TYPE_MASK 0x20000000L
|
||||
#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_MODE_MASK 0x40000000L
|
||||
//PWR_DISP_TIMER2_CONTROL
|
||||
#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_COUNT__SHIFT 0x0
|
||||
#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_ENABLE__SHIFT 0x19
|
||||
#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_DISABLE__SHIFT 0x1a
|
||||
#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_MASK__SHIFT 0x1b
|
||||
#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_STAT_AK__SHIFT 0x1c
|
||||
#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_TYPE__SHIFT 0x1d
|
||||
#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_MODE__SHIFT 0x1e
|
||||
#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_COUNT_MASK 0x01FFFFFFL
|
||||
#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_ENABLE_MASK 0x02000000L
|
||||
#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_DISABLE_MASK 0x04000000L
|
||||
#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_MASK_MASK 0x08000000L
|
||||
#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_STAT_AK_MASK 0x10000000L
|
||||
#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_TYPE_MASK 0x20000000L
|
||||
#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_MODE_MASK 0x40000000L
|
||||
//PWR_DISP_TIMER_GLOBAL_CONTROL
|
||||
#define PWR_DISP_TIMER_GLOBAL_CONTROL__DISP_TIMER_PULSE_WIDTH__SHIFT 0x0
|
||||
#define PWR_DISP_TIMER_GLOBAL_CONTROL__DISP_TIMER_PULSE_EN__SHIFT 0xa
|
||||
#define PWR_DISP_TIMER_GLOBAL_CONTROL__DISP_TIMER_PULSE_WIDTH_MASK 0x000003FFL
|
||||
#define PWR_DISP_TIMER_GLOBAL_CONTROL__DISP_TIMER_PULSE_EN_MASK 0x00000400L
|
||||
//PWR_IH_CONTROL
|
||||
#define PWR_IH_CONTROL__MAX_CREDIT__SHIFT 0x0
|
||||
#define PWR_IH_CONTROL__DISP_TIMER_TRIGGER_MASK__SHIFT 0x5
|
||||
#define PWR_IH_CONTROL__DISP_TIMER2_TRIGGER_MASK__SHIFT 0x6
|
||||
#define PWR_IH_CONTROL__PWR_IH_CLK_GATE_EN__SHIFT 0x1f
|
||||
#define PWR_IH_CONTROL__MAX_CREDIT_MASK 0x0000001FL
|
||||
#define PWR_IH_CONTROL__DISP_TIMER_TRIGGER_MASK_MASK 0x00000020L
|
||||
#define PWR_IH_CONTROL__DISP_TIMER2_TRIGGER_MASK_MASK 0x00000040L
|
||||
#define PWR_IH_CONTROL__PWR_IH_CLK_GATE_EN_MASK 0x80000000L
|
||||
|
||||
// addressBlock: smuio_smuio_svi0_SmuSmuioDec
|
||||
//SMUSVI0_TEL_PLANE0
|
||||
#define SMUSVI0_TEL_PLANE0__SVI0_PLANE0_IDDCOR__SHIFT 0x0
|
||||
#define SMUSVI0_TEL_PLANE0__SVI0_PLANE0_VDDCOR__SHIFT 0x10
|
||||
#define SMUSVI0_TEL_PLANE0__SVI0_PLANE0_IDDCOR_MASK 0x000000FFL
|
||||
#define SMUSVI0_TEL_PLANE0__SVI0_PLANE0_VDDCOR_MASK 0x01FF0000L
|
||||
//SMUSVI0_PLANE0_CURRENTVID
|
||||
#define SMUSVI0_PLANE0_CURRENTVID__CURRENT_SVI0_PLANE0_VID__SHIFT 0x18
|
||||
#define SMUSVI0_PLANE0_CURRENTVID__CURRENT_SVI0_PLANE0_VID_MASK 0xFF000000L
|
||||
|
||||
#endif
|
@ -232,6 +232,7 @@ union MESAPI_SET_HW_RESOURCES {
|
||||
};
|
||||
uint32_t oversubscription_timer;
|
||||
uint64_t doorbell_info;
|
||||
uint64_t event_intr_history_gpu_mc_ptr;
|
||||
};
|
||||
|
||||
uint32_t max_dwords_in_api[API_FRAME_SIZE_IN_DWORDS];
|
||||
|
@ -2238,10 +2238,10 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_
|
||||
} else if (DEVICE_ATTR_IS(xgmi_plpd_policy)) {
|
||||
if (amdgpu_dpm_get_xgmi_plpd_mode(adev, NULL) == XGMI_PLPD_NONE)
|
||||
*states = ATTR_STATE_UNSUPPORTED;
|
||||
} else if (DEVICE_ATTR_IS(pp_dpm_mclk_od)) {
|
||||
} else if (DEVICE_ATTR_IS(pp_mclk_od)) {
|
||||
if (amdgpu_dpm_get_mclk_od(adev) == -EOPNOTSUPP)
|
||||
*states = ATTR_STATE_UNSUPPORTED;
|
||||
} else if (DEVICE_ATTR_IS(pp_dpm_sclk_od)) {
|
||||
} else if (DEVICE_ATTR_IS(pp_sclk_od)) {
|
||||
if (amdgpu_dpm_get_sclk_od(adev) == -EOPNOTSUPP)
|
||||
*states = ATTR_STATE_UNSUPPORTED;
|
||||
} else if (DEVICE_ATTR_IS(apu_thermal_cap)) {
|
||||
|
@ -687,11 +687,16 @@ static void radeon_crtc_init(struct drm_device *dev, int index)
|
||||
if (radeon_crtc == NULL)
|
||||
return;
|
||||
|
||||
radeon_crtc->flip_queue = alloc_workqueue("radeon-crtc", WQ_HIGHPRI, 0);
|
||||
if (!radeon_crtc->flip_queue) {
|
||||
kfree(radeon_crtc);
|
||||
return;
|
||||
}
|
||||
|
||||
drm_crtc_init(dev, &radeon_crtc->base, &radeon_crtc_funcs);
|
||||
|
||||
drm_mode_crtc_set_gamma_size(&radeon_crtc->base, 256);
|
||||
radeon_crtc->crtc_id = index;
|
||||
radeon_crtc->flip_queue = alloc_workqueue("radeon-crtc", WQ_HIGHPRI, 0);
|
||||
rdev->mode_info.crtcs[index] = radeon_crtc;
|
||||
|
||||
if (rdev->family >= CHIP_BONAIRE) {
|
||||
|
@ -1493,8 +1493,10 @@ static int sumo_parse_power_table(struct radeon_device *rdev)
|
||||
non_clock_array_index = power_state->v2.nonClockInfoIndex;
|
||||
non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
|
||||
&non_clock_info_array->nonClockInfo[non_clock_array_index];
|
||||
if (!rdev->pm.power_state[i].clock_info)
|
||||
if (!rdev->pm.power_state[i].clock_info) {
|
||||
kfree(rdev->pm.dpm.ps);
|
||||
return -EINVAL;
|
||||
}
|
||||
ps = kzalloc(sizeof(struct sumo_ps), GFP_KERNEL);
|
||||
if (ps == NULL) {
|
||||
kfree(rdev->pm.dpm.ps);
|
||||
|
@ -1726,8 +1726,10 @@ static int trinity_parse_power_table(struct radeon_device *rdev)
|
||||
non_clock_array_index = power_state->v2.nonClockInfoIndex;
|
||||
non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
|
||||
&non_clock_info_array->nonClockInfo[non_clock_array_index];
|
||||
if (!rdev->pm.power_state[i].clock_info)
|
||||
if (!rdev->pm.power_state[i].clock_info) {
|
||||
kfree(rdev->pm.dpm.ps);
|
||||
return -EINVAL;
|
||||
}
|
||||
ps = kzalloc(sizeof(struct sumo_ps), GFP_KERNEL);
|
||||
if (ps == NULL) {
|
||||
kfree(rdev->pm.dpm.ps);
|
||||
|
Loading…
Reference in New Issue
Block a user