Merge drm/drm-next into drm-misc-next

Daniel needs a few commits from drm-next.

Signed-off-by: Maxime Ripard <maxime@cerno.tech>
This commit is contained in:
Maxime Ripard
2020-03-04 08:56:28 +01:00
362 changed files with 21333 additions and 13742 deletions

View File

@@ -8424,7 +8424,7 @@ M: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
M: Rodrigo Vivi <rodrigo.vivi@intel.com> M: Rodrigo Vivi <rodrigo.vivi@intel.com>
L: intel-gfx@lists.freedesktop.org L: intel-gfx@lists.freedesktop.org
W: https://01.org/linuxgraphics/ W: https://01.org/linuxgraphics/
B: https://01.org/linuxgraphics/documentation/how-report-bugs B: https://gitlab.freedesktop.org/drm/intel/-/wikis/How-to-file-i915-bugs
C: irc://chat.freenode.net/intel-gfx C: irc://chat.freenode.net/intel-gfx
Q: http://patchwork.freedesktop.org/project/intel-gfx/ Q: http://patchwork.freedesktop.org/project/intel-gfx/
T: git git://anongit.freedesktop.org/drm-intel T: git git://anongit.freedesktop.org/drm-intel

View File

@@ -969,6 +969,7 @@ struct amdgpu_device {
int pstate; int pstate;
/* enable runtime pm on the device */ /* enable runtime pm on the device */
bool runpm; bool runpm;
bool in_runpm;
bool pm_sysfs_en; bool pm_sysfs_en;
bool ucode_sysfs_en; bool ucode_sysfs_en;

View File

@@ -126,7 +126,7 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
/* this is going to have a few of the MSBs set that we need to /* this is going to have a few of the MSBs set that we need to
* clear * clear
*/ */
bitmap_complement(gpu_resources.queue_bitmap, bitmap_complement(gpu_resources.cp_queue_bitmap,
adev->gfx.mec.queue_bitmap, adev->gfx.mec.queue_bitmap,
KGD_MAX_QUEUES); KGD_MAX_QUEUES);
@@ -137,7 +137,7 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
* adev->gfx.mec.num_pipe_per_mec * adev->gfx.mec.num_pipe_per_mec
* adev->gfx.mec.num_queue_per_pipe; * adev->gfx.mec.num_queue_per_pipe;
for (i = last_valid_bit; i < KGD_MAX_QUEUES; ++i) for (i = last_valid_bit; i < KGD_MAX_QUEUES; ++i)
clear_bit(i, gpu_resources.queue_bitmap); clear_bit(i, gpu_resources.cp_queue_bitmap);
amdgpu_doorbell_get_kfd_info(adev, amdgpu_doorbell_get_kfd_info(adev,
&gpu_resources.doorbell_physical_address, &gpu_resources.doorbell_physical_address,
@@ -178,18 +178,18 @@ void amdgpu_amdkfd_interrupt(struct amdgpu_device *adev,
kgd2kfd_interrupt(adev->kfd.dev, ih_ring_entry); kgd2kfd_interrupt(adev->kfd.dev, ih_ring_entry);
} }
void amdgpu_amdkfd_suspend(struct amdgpu_device *adev) void amdgpu_amdkfd_suspend(struct amdgpu_device *adev, bool run_pm)
{ {
if (adev->kfd.dev) if (adev->kfd.dev)
kgd2kfd_suspend(adev->kfd.dev); kgd2kfd_suspend(adev->kfd.dev, run_pm);
} }
int amdgpu_amdkfd_resume(struct amdgpu_device *adev) int amdgpu_amdkfd_resume(struct amdgpu_device *adev, bool run_pm)
{ {
int r = 0; int r = 0;
if (adev->kfd.dev) if (adev->kfd.dev)
r = kgd2kfd_resume(adev->kfd.dev); r = kgd2kfd_resume(adev->kfd.dev, run_pm);
return r; return r;
} }
@@ -402,7 +402,7 @@ void amdgpu_amdkfd_get_local_mem_info(struct kgd_dev *kgd,
if (amdgpu_sriov_vf(adev)) if (amdgpu_sriov_vf(adev))
mem_info->mem_clk_max = adev->clock.default_mclk / 100; mem_info->mem_clk_max = adev->clock.default_mclk / 100;
else if (adev->powerplay.pp_funcs) { else if (adev->pm.dpm_enabled) {
if (amdgpu_emu_mode == 1) if (amdgpu_emu_mode == 1)
mem_info->mem_clk_max = 0; mem_info->mem_clk_max = 0;
else else
@@ -427,7 +427,7 @@ uint32_t amdgpu_amdkfd_get_max_engine_clock_in_mhz(struct kgd_dev *kgd)
/* the sclk is in quantas of 10kHz */ /* the sclk is in quantas of 10kHz */
if (amdgpu_sriov_vf(adev)) if (amdgpu_sriov_vf(adev))
return adev->clock.default_sclk / 100; return adev->clock.default_sclk / 100;
else if (adev->powerplay.pp_funcs) else if (adev->pm.dpm_enabled)
return amdgpu_dpm_get_sclk(adev, false) / 100; return amdgpu_dpm_get_sclk(adev, false) / 100;
else else
return 100; return 100;
@@ -525,6 +525,14 @@ uint64_t amdgpu_amdkfd_get_hive_id(struct kgd_dev *kgd)
return adev->gmc.xgmi.hive_id; return adev->gmc.xgmi.hive_id;
} }
uint64_t amdgpu_amdkfd_get_unique_id(struct kgd_dev *kgd)
{
struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
return adev->unique_id;
}
uint8_t amdgpu_amdkfd_get_xgmi_hops_count(struct kgd_dev *dst, struct kgd_dev *src) uint8_t amdgpu_amdkfd_get_xgmi_hops_count(struct kgd_dev *dst, struct kgd_dev *src)
{ {
struct amdgpu_device *peer_adev = (struct amdgpu_device *)src; struct amdgpu_device *peer_adev = (struct amdgpu_device *)src;
@@ -647,13 +655,9 @@ int amdgpu_amdkfd_flush_gpu_tlb_vmid(struct kgd_dev *kgd, uint16_t vmid)
int amdgpu_amdkfd_flush_gpu_tlb_pasid(struct kgd_dev *kgd, uint16_t pasid) int amdgpu_amdkfd_flush_gpu_tlb_pasid(struct kgd_dev *kgd, uint16_t pasid)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)kgd; struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
uint32_t flush_type = 0; const uint32_t flush_type = 0;
bool all_hub = false; bool all_hub = false;
if (adev->gmc.xgmi.num_physical_nodes &&
adev->asic_type == CHIP_VEGA20)
flush_type = 2;
if (adev->family == AMDGPU_FAMILY_AI) if (adev->family == AMDGPU_FAMILY_AI)
all_hub = true; all_hub = true;
@@ -677,6 +681,11 @@ void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo)
{ {
} }
int amdgpu_amdkfd_remove_fence_on_pt_pd_bos(struct amdgpu_bo *bo)
{
return 0;
}
void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev, void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev,
struct amdgpu_vm *vm) struct amdgpu_vm *vm)
{ {
@@ -713,11 +722,11 @@ void kgd2kfd_exit(void)
{ {
} }
void kgd2kfd_suspend(struct kfd_dev *kfd) void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm)
{ {
} }
int kgd2kfd_resume(struct kfd_dev *kfd) int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm)
{ {
return 0; return 0;
} }

View File

@@ -96,6 +96,7 @@ struct amdgpu_amdkfd_fence *amdgpu_amdkfd_fence_create(u64 context,
struct mm_struct *mm); struct mm_struct *mm);
bool amdkfd_fence_check_mm(struct dma_fence *f, struct mm_struct *mm); bool amdkfd_fence_check_mm(struct dma_fence *f, struct mm_struct *mm);
struct amdgpu_amdkfd_fence *to_amdgpu_amdkfd_fence(struct dma_fence *f); struct amdgpu_amdkfd_fence *to_amdgpu_amdkfd_fence(struct dma_fence *f);
int amdgpu_amdkfd_remove_fence_on_pt_pd_bos(struct amdgpu_bo *bo);
struct amdkfd_process_info { struct amdkfd_process_info {
/* List head of all VMs that belong to a KFD process */ /* List head of all VMs that belong to a KFD process */
@@ -122,8 +123,8 @@ struct amdkfd_process_info {
int amdgpu_amdkfd_init(void); int amdgpu_amdkfd_init(void);
void amdgpu_amdkfd_fini(void); void amdgpu_amdkfd_fini(void);
void amdgpu_amdkfd_suspend(struct amdgpu_device *adev); void amdgpu_amdkfd_suspend(struct amdgpu_device *adev, bool run_pm);
int amdgpu_amdkfd_resume(struct amdgpu_device *adev); int amdgpu_amdkfd_resume(struct amdgpu_device *adev, bool run_pm);
void amdgpu_amdkfd_interrupt(struct amdgpu_device *adev, void amdgpu_amdkfd_interrupt(struct amdgpu_device *adev,
const void *ih_ring_entry); const void *ih_ring_entry);
void amdgpu_amdkfd_device_probe(struct amdgpu_device *adev); void amdgpu_amdkfd_device_probe(struct amdgpu_device *adev);
@@ -171,6 +172,7 @@ int amdgpu_amdkfd_get_dmabuf_info(struct kgd_dev *kgd, int dma_buf_fd,
uint32_t *flags); uint32_t *flags);
uint64_t amdgpu_amdkfd_get_vram_usage(struct kgd_dev *kgd); uint64_t amdgpu_amdkfd_get_vram_usage(struct kgd_dev *kgd);
uint64_t amdgpu_amdkfd_get_hive_id(struct kgd_dev *kgd); uint64_t amdgpu_amdkfd_get_hive_id(struct kgd_dev *kgd);
uint64_t amdgpu_amdkfd_get_unique_id(struct kgd_dev *kgd);
uint64_t amdgpu_amdkfd_get_mmio_remap_phys_addr(struct kgd_dev *kgd); uint64_t amdgpu_amdkfd_get_mmio_remap_phys_addr(struct kgd_dev *kgd);
uint32_t amdgpu_amdkfd_get_num_gws(struct kgd_dev *kgd); uint32_t amdgpu_amdkfd_get_num_gws(struct kgd_dev *kgd);
uint8_t amdgpu_amdkfd_get_xgmi_hops_count(struct kgd_dev *dst, struct kgd_dev *src); uint8_t amdgpu_amdkfd_get_xgmi_hops_count(struct kgd_dev *dst, struct kgd_dev *src);
@@ -249,8 +251,8 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
struct drm_device *ddev, struct drm_device *ddev,
const struct kgd2kfd_shared_resources *gpu_resources); const struct kgd2kfd_shared_resources *gpu_resources);
void kgd2kfd_device_exit(struct kfd_dev *kfd); void kgd2kfd_device_exit(struct kfd_dev *kfd);
void kgd2kfd_suspend(struct kfd_dev *kfd); void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm);
int kgd2kfd_resume(struct kfd_dev *kfd); int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm);
int kgd2kfd_pre_reset(struct kfd_dev *kfd); int kgd2kfd_pre_reset(struct kfd_dev *kfd);
int kgd2kfd_post_reset(struct kfd_dev *kfd); int kgd2kfd_post_reset(struct kfd_dev *kfd);
void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry); void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry);

View File

@@ -808,4 +808,5 @@ const struct kfd2kgd_calls gfx_v10_kfd2kgd = {
.get_tile_config = amdgpu_amdkfd_get_tile_config, .get_tile_config = amdgpu_amdkfd_get_tile_config,
.set_vm_context_page_table_base = set_vm_context_page_table_base, .set_vm_context_page_table_base = set_vm_context_page_table_base,
.get_hive_id = amdgpu_amdkfd_get_hive_id, .get_hive_id = amdgpu_amdkfd_get_hive_id,
.get_unique_id = amdgpu_amdkfd_get_unique_id,
}; };

View File

@@ -739,4 +739,5 @@ const struct kfd2kgd_calls gfx_v9_kfd2kgd = {
.get_tile_config = kgd_gfx_v9_get_tile_config, .get_tile_config = kgd_gfx_v9_get_tile_config,
.set_vm_context_page_table_base = kgd_gfx_v9_set_vm_context_page_table_base, .set_vm_context_page_table_base = kgd_gfx_v9_set_vm_context_page_table_base,
.get_hive_id = amdgpu_amdkfd_get_hive_id, .get_hive_id = amdgpu_amdkfd_get_hive_id,
.get_unique_id = amdgpu_amdkfd_get_unique_id,
}; };

View File

@@ -276,6 +276,42 @@ static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo,
return 0; return 0;
} }
int amdgpu_amdkfd_remove_fence_on_pt_pd_bos(struct amdgpu_bo *bo)
{
struct amdgpu_bo *root = bo;
struct amdgpu_vm_bo_base *vm_bo;
struct amdgpu_vm *vm;
struct amdkfd_process_info *info;
struct amdgpu_amdkfd_fence *ef;
int ret;
/* we can always get vm_bo from root PD bo.*/
while (root->parent)
root = root->parent;
vm_bo = root->vm_bo;
if (!vm_bo)
return 0;
vm = vm_bo->vm;
if (!vm)
return 0;
info = vm->process_info;
if (!info || !info->eviction_fence)
return 0;
ef = container_of(dma_fence_get(&info->eviction_fence->base),
struct amdgpu_amdkfd_fence, base);
BUG_ON(!dma_resv_trylock(bo->tbo.base.resv));
ret = amdgpu_amdkfd_remove_eviction_fence(bo, ef);
dma_resv_unlock(bo->tbo.base.resv);
dma_fence_put(&ef->base);
return ret;
}
static int amdgpu_amdkfd_bo_validate(struct amdgpu_bo *bo, uint32_t domain, static int amdgpu_amdkfd_bo_validate(struct amdgpu_bo *bo, uint32_t domain,
bool wait) bool wait)
{ {
@@ -847,9 +883,9 @@ static int process_sync_pds_resv(struct amdkfd_process_info *process_info,
vm_list_node) { vm_list_node) {
struct amdgpu_bo *pd = peer_vm->root.base.bo; struct amdgpu_bo *pd = peer_vm->root.base.bo;
ret = amdgpu_sync_resv(NULL, ret = amdgpu_sync_resv(NULL, sync, pd->tbo.base.resv,
sync, pd->tbo.base.resv, AMDGPU_SYNC_NE_OWNER,
AMDGPU_FENCE_OWNER_KFD, false); AMDGPU_FENCE_OWNER_KFD);
if (ret) if (ret)
return ret; return ret;
} }
@@ -1044,6 +1080,8 @@ void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev,
list_del(&vm->vm_list_node); list_del(&vm->vm_list_node);
mutex_unlock(&process_info->lock); mutex_unlock(&process_info->lock);
vm->process_info = NULL;
/* Release per-process resources when last compute VM is destroyed */ /* Release per-process resources when last compute VM is destroyed */
if (!process_info->n_vms) { if (!process_info->n_vms) {
WARN_ON(!list_empty(&process_info->kfd_bo_list)); WARN_ON(!list_empty(&process_info->kfd_bo_list));

View File

@@ -1461,6 +1461,20 @@ static enum drm_mode_status amdgpu_connector_dp_mode_valid(struct drm_connector
return MODE_OK; return MODE_OK;
} }
static int
amdgpu_connector_late_register(struct drm_connector *connector)
{
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
int r = 0;
if (amdgpu_connector->ddc_bus->has_aux) {
amdgpu_connector->ddc_bus->aux.dev = amdgpu_connector->base.kdev;
r = drm_dp_aux_register(&amdgpu_connector->ddc_bus->aux);
}
return r;
}
static const struct drm_connector_helper_funcs amdgpu_connector_dp_helper_funcs = { static const struct drm_connector_helper_funcs amdgpu_connector_dp_helper_funcs = {
.get_modes = amdgpu_connector_dp_get_modes, .get_modes = amdgpu_connector_dp_get_modes,
.mode_valid = amdgpu_connector_dp_mode_valid, .mode_valid = amdgpu_connector_dp_mode_valid,
@@ -1475,6 +1489,7 @@ static const struct drm_connector_funcs amdgpu_connector_dp_funcs = {
.early_unregister = amdgpu_connector_unregister, .early_unregister = amdgpu_connector_unregister,
.destroy = amdgpu_connector_destroy, .destroy = amdgpu_connector_destroy,
.force = amdgpu_connector_dvi_force, .force = amdgpu_connector_dvi_force,
.late_register = amdgpu_connector_late_register,
}; };
static const struct drm_connector_funcs amdgpu_connector_edp_funcs = { static const struct drm_connector_funcs amdgpu_connector_edp_funcs = {
@@ -1485,6 +1500,7 @@ static const struct drm_connector_funcs amdgpu_connector_edp_funcs = {
.early_unregister = amdgpu_connector_unregister, .early_unregister = amdgpu_connector_unregister,
.destroy = amdgpu_connector_destroy, .destroy = amdgpu_connector_destroy,
.force = amdgpu_connector_dvi_force, .force = amdgpu_connector_dvi_force,
.late_register = amdgpu_connector_late_register,
}; };
void void
@@ -1931,7 +1947,6 @@ amdgpu_connector_add(struct amdgpu_device *adev,
connector->polled = DRM_CONNECTOR_POLL_HPD; connector->polled = DRM_CONNECTOR_POLL_HPD;
connector->display_info.subpixel_order = subpixel_order; connector->display_info.subpixel_order = subpixel_order;
drm_connector_register(connector);
if (has_aux) if (has_aux)
amdgpu_atombios_dp_aux_init(amdgpu_connector); amdgpu_atombios_dp_aux_init(amdgpu_connector);

View File

@@ -654,16 +654,19 @@ out:
static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p) static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p)
{ {
struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
struct amdgpu_bo_list_entry *e; struct amdgpu_bo_list_entry *e;
int r; int r;
list_for_each_entry(e, &p->validated, tv.head) { list_for_each_entry(e, &p->validated, tv.head) {
struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo); struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
struct dma_resv *resv = bo->tbo.base.resv; struct dma_resv *resv = bo->tbo.base.resv;
enum amdgpu_sync_mode sync_mode;
r = amdgpu_sync_resv(p->adev, &p->job->sync, resv, p->filp, sync_mode = amdgpu_bo_explicit_sync(bo) ?
amdgpu_bo_explicit_sync(bo)); AMDGPU_SYNC_EXPLICIT : AMDGPU_SYNC_NE_OWNER;
r = amdgpu_sync_resv(p->adev, &p->job->sync, resv, sync_mode,
&fpriv->vm);
if (r) if (r)
return r; return r;
} }
@@ -1214,7 +1217,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
job = p->job; job = p->job;
p->job = NULL; p->job = NULL;
r = drm_sched_job_init(&job->base, entity, p->filp); r = drm_sched_job_init(&job->base, entity, &fpriv->vm);
if (r) if (r)
goto error_unlock; goto error_unlock;

View File

@@ -31,6 +31,8 @@
#include <drm/drm_debugfs.h> #include <drm/drm_debugfs.h>
#include "amdgpu.h" #include "amdgpu.h"
#include "amdgpu_pm.h"
#include "amdgpu_dm_debugfs.h"
/** /**
* amdgpu_debugfs_add_files - Add simple debugfs entries * amdgpu_debugfs_add_files - Add simple debugfs entries
@@ -840,6 +842,55 @@ err:
return result; return result;
} }
/**
* amdgpu_debugfs_regs_gfxoff_write - Enable/disable GFXOFF
*
* @f: open file handle
* @buf: User buffer to write data from
* @size: Number of bytes to write
* @pos: Offset to seek to
*
* Write a 32-bit zero to disable or a 32-bit non-zero to enable
*/
static ssize_t amdgpu_debugfs_gfxoff_write(struct file *f, const char __user *buf,
size_t size, loff_t *pos)
{
struct amdgpu_device *adev = file_inode(f)->i_private;
ssize_t result = 0;
int r;
if (size & 0x3 || *pos & 0x3)
return -EINVAL;
r = pm_runtime_get_sync(adev->ddev->dev);
if (r < 0)
return r;
while (size) {
uint32_t value;
r = get_user(value, (uint32_t *)buf);
if (r) {
pm_runtime_mark_last_busy(adev->ddev->dev);
pm_runtime_put_autosuspend(adev->ddev->dev);
return r;
}
amdgpu_gfx_off_ctrl(adev, value ? true : false);
result += 4;
buf += 4;
*pos += 4;
size -= 4;
}
pm_runtime_mark_last_busy(adev->ddev->dev);
pm_runtime_put_autosuspend(adev->ddev->dev);
return result;
}
static const struct file_operations amdgpu_debugfs_regs_fops = { static const struct file_operations amdgpu_debugfs_regs_fops = {
.owner = THIS_MODULE, .owner = THIS_MODULE,
.read = amdgpu_debugfs_regs_read, .read = amdgpu_debugfs_regs_read,
@@ -888,6 +939,11 @@ static const struct file_operations amdgpu_debugfs_gpr_fops = {
.llseek = default_llseek .llseek = default_llseek
}; };
static const struct file_operations amdgpu_debugfs_gfxoff_fops = {
.owner = THIS_MODULE,
.write = amdgpu_debugfs_gfxoff_write,
};
static const struct file_operations *debugfs_regs[] = { static const struct file_operations *debugfs_regs[] = {
&amdgpu_debugfs_regs_fops, &amdgpu_debugfs_regs_fops,
&amdgpu_debugfs_regs_didt_fops, &amdgpu_debugfs_regs_didt_fops,
@@ -897,6 +953,7 @@ static const struct file_operations *debugfs_regs[] = {
&amdgpu_debugfs_sensors_fops, &amdgpu_debugfs_sensors_fops,
&amdgpu_debugfs_wave_fops, &amdgpu_debugfs_wave_fops,
&amdgpu_debugfs_gpr_fops, &amdgpu_debugfs_gpr_fops,
&amdgpu_debugfs_gfxoff_fops,
}; };
static const char *debugfs_regs_names[] = { static const char *debugfs_regs_names[] = {
@@ -908,6 +965,7 @@ static const char *debugfs_regs_names[] = {
"amdgpu_sensors", "amdgpu_sensors",
"amdgpu_wave", "amdgpu_wave",
"amdgpu_gpr", "amdgpu_gpr",
"amdgpu_gfxoff",
}; };
/** /**
@@ -1216,6 +1274,8 @@ DEFINE_SIMPLE_ATTRIBUTE(fops_ib_preempt, NULL,
int amdgpu_debugfs_init(struct amdgpu_device *adev) int amdgpu_debugfs_init(struct amdgpu_device *adev)
{ {
int r, i;
adev->debugfs_preempt = adev->debugfs_preempt =
debugfs_create_file("amdgpu_preempt_ib", 0600, debugfs_create_file("amdgpu_preempt_ib", 0600,
adev->ddev->primary->debugfs_root, adev, adev->ddev->primary->debugfs_root, adev,
@@ -1225,12 +1285,73 @@ int amdgpu_debugfs_init(struct amdgpu_device *adev)
return -EIO; return -EIO;
} }
/* Register debugfs entries for amdgpu_ttm */
r = amdgpu_ttm_debugfs_init(adev);
if (r) {
DRM_ERROR("Failed to init debugfs\n");
return r;
}
r = amdgpu_debugfs_pm_init(adev);
if (r) {
DRM_ERROR("Failed to register debugfs file for dpm!\n");
return r;
}
if (amdgpu_debugfs_sa_init(adev)) {
dev_err(adev->dev, "failed to register debugfs file for SA\n");
}
if (amdgpu_debugfs_fence_init(adev))
dev_err(adev->dev, "fence debugfs file creation failed\n");
r = amdgpu_debugfs_gem_init(adev);
if (r)
DRM_ERROR("registering gem debugfs failed (%d).\n", r);
r = amdgpu_debugfs_regs_init(adev);
if (r)
DRM_ERROR("registering register debugfs failed (%d).\n", r);
r = amdgpu_debugfs_firmware_init(adev);
if (r)
DRM_ERROR("registering firmware debugfs failed (%d).\n", r);
#if defined(CONFIG_DRM_AMD_DC)
if (amdgpu_device_has_dc_support(adev)) {
if (dtn_debugfs_init(adev))
DRM_ERROR("amdgpu: failed initialize dtn debugfs support.\n");
}
#endif
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
struct amdgpu_ring *ring = adev->rings[i];
if (!ring)
continue;
if (amdgpu_debugfs_ring_init(adev, ring)) {
DRM_ERROR("Failed to register debugfs file for rings !\n");
}
}
return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_list, return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_list,
ARRAY_SIZE(amdgpu_debugfs_list)); ARRAY_SIZE(amdgpu_debugfs_list));
} }
void amdgpu_debugfs_preempt_cleanup(struct amdgpu_device *adev) void amdgpu_debugfs_fini(struct amdgpu_device *adev)
{ {
int i;
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
struct amdgpu_ring *ring = adev->rings[i];
if (!ring)
continue;
amdgpu_debugfs_ring_fini(ring);
}
amdgpu_ttm_debugfs_fini(adev);
debugfs_remove(adev->debugfs_preempt); debugfs_remove(adev->debugfs_preempt);
} }
@@ -1239,7 +1360,7 @@ int amdgpu_debugfs_init(struct amdgpu_device *adev)
{ {
return 0; return 0;
} }
void amdgpu_debugfs_preempt_cleanup(struct amdgpu_device *adev) { } void amdgpu_debugfs_fini(struct amdgpu_device *adev) { }
int amdgpu_debugfs_regs_init(struct amdgpu_device *adev) int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
{ {
return 0; return 0;

View File

@@ -34,7 +34,7 @@ struct amdgpu_debugfs {
int amdgpu_debugfs_regs_init(struct amdgpu_device *adev); int amdgpu_debugfs_regs_init(struct amdgpu_device *adev);
void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev); void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev);
int amdgpu_debugfs_init(struct amdgpu_device *adev); int amdgpu_debugfs_init(struct amdgpu_device *adev);
void amdgpu_debugfs_preempt_cleanup(struct amdgpu_device *adev); void amdgpu_debugfs_fini(struct amdgpu_device *adev);
int amdgpu_debugfs_add_files(struct amdgpu_device *adev, int amdgpu_debugfs_add_files(struct amdgpu_device *adev,
const struct drm_info_list *files, const struct drm_info_list *files,
unsigned nfiles); unsigned nfiles);

View File

@@ -183,20 +183,51 @@ bool amdgpu_device_supports_baco(struct drm_device *dev)
void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos, void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos,
uint32_t *buf, size_t size, bool write) uint32_t *buf, size_t size, bool write)
{ {
uint64_t last;
unsigned long flags; unsigned long flags;
uint32_t hi = ~0;
uint64_t last;
#ifdef CONFIG_64BIT
last = min(pos + size, adev->gmc.visible_vram_size);
if (last > pos) {
void __iomem *addr = adev->mman.aper_base_kaddr + pos;
size_t count = last - pos;
if (write) {
memcpy_toio(addr, buf, count);
mb();
amdgpu_asic_flush_hdp(adev, NULL);
} else {
amdgpu_asic_invalidate_hdp(adev, NULL);
mb();
memcpy_fromio(buf, addr, count);
}
if (count == size)
return;
pos += count;
buf += count / 4;
size -= count;
}
#endif
last = size - 4;
for (last += pos; pos <= last; pos += 4) {
spin_lock_irqsave(&adev->mmio_idx_lock, flags); spin_lock_irqsave(&adev->mmio_idx_lock, flags);
for (last = pos + size; pos < last; pos += 4) {
uint32_t tmp = pos >> 31;
WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)pos) | 0x80000000); WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)pos) | 0x80000000);
WREG32_NO_KIQ(mmMM_INDEX_HI, pos >> 31); if (tmp != hi) {
WREG32_NO_KIQ(mmMM_INDEX_HI, tmp);
hi = tmp;
}
if (write) if (write)
WREG32_NO_KIQ(mmMM_DATA, *buf++); WREG32_NO_KIQ(mmMM_DATA, *buf++);
else else
*buf++ = RREG32_NO_KIQ(mmMM_DATA); *buf++ = RREG32_NO_KIQ(mmMM_DATA);
spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
} }
spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
} }
/* /*
@@ -2344,6 +2375,7 @@ static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
} }
adev->ip_blocks[i].status.hw = false; adev->ip_blocks[i].status.hw = false;
/* handle putting the SMC in the appropriate state */ /* handle putting the SMC in the appropriate state */
if(!amdgpu_sriov_vf(adev)){
if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) { if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
r = amdgpu_dpm_set_mp1_state(adev, adev->mp1_state); r = amdgpu_dpm_set_mp1_state(adev, adev->mp1_state);
if (r) { if (r) {
@@ -2352,7 +2384,7 @@ static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
return r; return r;
} }
} }
}
adev->ip_blocks[i].status.hw = false; adev->ip_blocks[i].status.hw = false;
} }
@@ -2800,7 +2832,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT; adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT;
if (amdgpu_emu_mode == 1) if (amdgpu_emu_mode == 1)
adev->usec_timeout *= 2; adev->usec_timeout *= 10;
adev->gmc.gart_size = 512 * 1024 * 1024; adev->gmc.gart_size = 512 * 1024 * 1024;
adev->accel_working = false; adev->accel_working = false;
adev->num_rings = 0; adev->num_rings = 0;
@@ -3088,22 +3120,6 @@ fence_driver_init:
} else } else
adev->ucode_sysfs_en = true; adev->ucode_sysfs_en = true;
r = amdgpu_debugfs_gem_init(adev);
if (r)
DRM_ERROR("registering gem debugfs failed (%d).\n", r);
r = amdgpu_debugfs_regs_init(adev);
if (r)
DRM_ERROR("registering register debugfs failed (%d).\n", r);
r = amdgpu_debugfs_firmware_init(adev);
if (r)
DRM_ERROR("registering firmware debugfs failed (%d).\n", r);
r = amdgpu_debugfs_init(adev);
if (r)
DRM_ERROR("Creating debugfs files failed (%d).\n", r);
if ((amdgpu_testing & 1)) { if ((amdgpu_testing & 1)) {
if (adev->accel_working) if (adev->accel_working)
amdgpu_test_moves(adev); amdgpu_test_moves(adev);
@@ -3225,7 +3241,6 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
amdgpu_ucode_sysfs_fini(adev); amdgpu_ucode_sysfs_fini(adev);
if (IS_ENABLED(CONFIG_PERF_EVENTS)) if (IS_ENABLED(CONFIG_PERF_EVENTS))
amdgpu_pmu_fini(adev); amdgpu_pmu_fini(adev);
amdgpu_debugfs_preempt_cleanup(adev);
if (amdgpu_discovery && adev->asic_type >= CHIP_NAVI10) if (amdgpu_discovery && adev->asic_type >= CHIP_NAVI10)
amdgpu_discovery_fini(adev); amdgpu_discovery_fini(adev);
} }
@@ -3309,7 +3324,7 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
} }
} }
amdgpu_amdkfd_suspend(adev); amdgpu_amdkfd_suspend(adev, !fbcon);
amdgpu_ras_suspend(adev); amdgpu_ras_suspend(adev);
@@ -3393,7 +3408,7 @@ int amdgpu_device_resume(struct drm_device *dev, bool fbcon)
} }
} }
} }
r = amdgpu_amdkfd_resume(adev); r = amdgpu_amdkfd_resume(adev, !fbcon);
if (r) if (r)
return r; return r;

View File

@@ -307,7 +307,7 @@ int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev)
for (hw_ip = 0; hw_ip < MAX_HWIP; hw_ip++) { for (hw_ip = 0; hw_ip < MAX_HWIP; hw_ip++) {
if (hw_id_map[hw_ip] == le16_to_cpu(ip->hw_id)) { if (hw_id_map[hw_ip] == le16_to_cpu(ip->hw_id)) {
DRM_INFO("set register base offset for %s\n", DRM_DEBUG("set register base offset for %s\n",
hw_id_names[le16_to_cpu(ip->hw_id)]); hw_id_names[le16_to_cpu(ip->hw_id)]);
adev->reg_offset[hw_ip][ip->number_instance] = adev->reg_offset[hw_ip][ip->number_instance] =
ip->base_address; ip->base_address;

View File

@@ -1171,3 +1171,20 @@ int amdgpu_dpm_set_xgmi_pstate(struct amdgpu_device *adev,
return ret; return ret;
} }
int amdgpu_dpm_set_df_cstate(struct amdgpu_device *adev,
uint32_t cstate)
{
int ret = 0;
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
void *pp_handle = adev->powerplay.pp_handle;
struct smu_context *smu = &adev->smu;
if (is_support_sw_smu(adev))
ret = smu_set_df_cstate(smu, cstate);
else if (pp_funcs &&
pp_funcs->set_df_cstate)
ret = pp_funcs->set_df_cstate(pp_handle, cstate);
return ret;
}

View File

@@ -533,4 +533,7 @@ int amdgpu_dpm_baco_exit(struct amdgpu_device *adev);
int amdgpu_dpm_baco_enter(struct amdgpu_device *adev); int amdgpu_dpm_baco_enter(struct amdgpu_device *adev);
int amdgpu_dpm_set_df_cstate(struct amdgpu_device *adev,
uint32_t cstate);
#endif #endif

View File

@@ -1021,6 +1021,7 @@ static int amdgpu_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *ent) const struct pci_device_id *ent)
{ {
struct drm_device *dev; struct drm_device *dev;
struct amdgpu_device *adev;
unsigned long flags = ent->driver_data; unsigned long flags = ent->driver_data;
int ret, retry = 0; int ret, retry = 0;
bool supports_atomic = false; bool supports_atomic = false;
@@ -1090,6 +1091,8 @@ static int amdgpu_pci_probe(struct pci_dev *pdev,
pci_set_drvdata(pdev, dev); pci_set_drvdata(pdev, dev);
amdgpu_driver_load_kms(dev, ent->driver_data);
retry_init: retry_init:
ret = drm_dev_register(dev, ent->driver_data); ret = drm_dev_register(dev, ent->driver_data);
if (ret == -EAGAIN && ++retry <= 3) { if (ret == -EAGAIN && ++retry <= 3) {
@@ -1100,6 +1103,11 @@ retry_init:
} else if (ret) } else if (ret)
goto err_pci; goto err_pci;
adev = dev->dev_private;
ret = amdgpu_debugfs_init(adev);
if (ret)
DRM_ERROR("Creating debugfs files failed (%d).\n", ret);
return 0; return 0;
err_pci: err_pci:
@@ -1113,6 +1121,7 @@ static void
amdgpu_pci_remove(struct pci_dev *pdev) amdgpu_pci_remove(struct pci_dev *pdev)
{ {
struct drm_device *dev = pci_get_drvdata(pdev); struct drm_device *dev = pci_get_drvdata(pdev);
struct amdgpu_device *adev = dev->dev_private;
#ifdef MODULE #ifdef MODULE
if (THIS_MODULE->state != MODULE_STATE_GOING) if (THIS_MODULE->state != MODULE_STATE_GOING)
@@ -1120,6 +1129,8 @@ amdgpu_pci_remove(struct pci_dev *pdev)
DRM_ERROR("Hotplug removal is not supported\n"); DRM_ERROR("Hotplug removal is not supported\n");
drm_dev_unplug(dev); drm_dev_unplug(dev);
drm_dev_put(dev); drm_dev_put(dev);
amdgpu_debugfs_fini(adev);
amdgpu_driver_unload_kms(dev);
pci_disable_device(pdev); pci_disable_device(pdev);
pci_set_drvdata(pdev, NULL); pci_set_drvdata(pdev, NULL);
} }
@@ -1220,11 +1231,15 @@ static int amdgpu_pmops_runtime_suspend(struct device *dev)
} }
} }
adev->in_runpm = true;
if (amdgpu_device_supports_boco(drm_dev)) if (amdgpu_device_supports_boco(drm_dev))
drm_dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; drm_dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
drm_kms_helper_poll_disable(drm_dev); drm_kms_helper_poll_disable(drm_dev);
ret = amdgpu_device_suspend(drm_dev, false); ret = amdgpu_device_suspend(drm_dev, false);
if (ret)
return ret;
if (amdgpu_device_supports_boco(drm_dev)) { if (amdgpu_device_supports_boco(drm_dev)) {
/* Only need to handle PCI state in the driver for ATPX /* Only need to handle PCI state in the driver for ATPX
* PCI core handles it for _PR3. * PCI core handles it for _PR3.
@@ -1278,6 +1293,7 @@ static int amdgpu_pmops_runtime_resume(struct device *dev)
drm_kms_helper_poll_enable(drm_dev); drm_kms_helper_poll_enable(drm_dev);
if (amdgpu_device_supports_boco(drm_dev)) if (amdgpu_device_supports_boco(drm_dev))
drm_dev->switch_power_state = DRM_SWITCH_POWER_ON; drm_dev->switch_power_state = DRM_SWITCH_POWER_ON;
adev->in_runpm = false;
return 0; return 0;
} }
@@ -1379,15 +1395,13 @@ int amdgpu_file_to_fpriv(struct file *filp, struct amdgpu_fpriv **fpriv)
static struct drm_driver kms_driver = { static struct drm_driver kms_driver = {
.driver_features = .driver_features =
DRIVER_USE_AGP | DRIVER_ATOMIC | DRIVER_ATOMIC |
DRIVER_GEM | DRIVER_GEM |
DRIVER_RENDER | DRIVER_MODESET | DRIVER_SYNCOBJ | DRIVER_RENDER | DRIVER_MODESET | DRIVER_SYNCOBJ |
DRIVER_SYNCOBJ_TIMELINE, DRIVER_SYNCOBJ_TIMELINE,
.load = amdgpu_driver_load_kms,
.open = amdgpu_driver_open_kms, .open = amdgpu_driver_open_kms,
.postclose = amdgpu_driver_postclose_kms, .postclose = amdgpu_driver_postclose_kms,
.lastclose = amdgpu_driver_lastclose_kms, .lastclose = amdgpu_driver_lastclose_kms,
.unload = amdgpu_driver_unload_kms,
.irq_handler = amdgpu_irq_handler, .irq_handler = amdgpu_irq_handler,
.ioctls = amdgpu_ioctls_kms, .ioctls = amdgpu_ioctls_kms,
.gem_free_object_unlocked = amdgpu_gem_object_free, .gem_free_object_unlocked = amdgpu_gem_object_free,

View File

@@ -503,9 +503,6 @@ int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
*/ */
int amdgpu_fence_driver_init(struct amdgpu_device *adev) int amdgpu_fence_driver_init(struct amdgpu_device *adev)
{ {
if (amdgpu_debugfs_fence_init(adev))
dev_err(adev->dev, "fence debugfs file creation failed\n");
return 0; return 0;
} }

View File

@@ -477,7 +477,7 @@ int amdgpu_gfx_disable_kcq(struct amdgpu_device *adev)
kiq->pmf->kiq_unmap_queues(kiq_ring, &adev->gfx.compute_ring[i], kiq->pmf->kiq_unmap_queues(kiq_ring, &adev->gfx.compute_ring[i],
RESET_QUEUES, 0, 0); RESET_QUEUES, 0, 0);
return amdgpu_ring_test_ring(kiq_ring); return amdgpu_ring_test_helper(kiq_ring);
} }
int amdgpu_gfx_enable_kcq(struct amdgpu_device *adev) int amdgpu_gfx_enable_kcq(struct amdgpu_device *adev)

View File

@@ -195,6 +195,7 @@ struct amdgpu_gmc {
uint32_t srbm_soft_reset; uint32_t srbm_soft_reset;
bool prt_warning; bool prt_warning;
uint64_t stolen_size; uint64_t stolen_size;
uint32_t sdpif_register;
/* apertures */ /* apertures */
u64 shared_aperture_start; u64 shared_aperture_start;
u64 shared_aperture_end; u64 shared_aperture_end;

View File

@@ -48,7 +48,6 @@
* produce command buffers which are send to the kernel and * produce command buffers which are send to the kernel and
* put in IBs for execution by the requested ring. * put in IBs for execution by the requested ring.
*/ */
static int amdgpu_debugfs_sa_init(struct amdgpu_device *adev);
/** /**
* amdgpu_ib_get - request an IB (Indirect Buffer) * amdgpu_ib_get - request an IB (Indirect Buffer)
@@ -295,9 +294,7 @@ int amdgpu_ib_pool_init(struct amdgpu_device *adev)
} }
adev->ib_pool_ready = true; adev->ib_pool_ready = true;
if (amdgpu_debugfs_sa_init(adev)) {
dev_err(adev->dev, "failed to register debugfs file for SA\n");
}
return 0; return 0;
} }
@@ -421,7 +418,7 @@ static const struct drm_info_list amdgpu_debugfs_sa_list[] = {
#endif #endif
static int amdgpu_debugfs_sa_init(struct amdgpu_device *adev) int amdgpu_debugfs_sa_init(struct amdgpu_device *adev)
{ {
#if defined(CONFIG_DEBUG_FS) #if defined(CONFIG_DEBUG_FS)
return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_sa_list, 1); return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_sa_list, 1);

View File

@@ -170,10 +170,16 @@ int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags)
} }
if (amdgpu_device_supports_boco(dev) && if (amdgpu_device_supports_boco(dev) &&
(amdgpu_runtime_pm != 0)) /* enable runpm by default */ (amdgpu_runtime_pm != 0)) /* enable runpm by default for boco */
adev->runpm = true; adev->runpm = true;
else if (amdgpu_device_supports_baco(dev) && else if (amdgpu_device_supports_baco(dev) &&
(amdgpu_runtime_pm > 0)) /* enable runpm if runpm=1 */ (amdgpu_runtime_pm != 0) &&
(adev->asic_type >= CHIP_TOPAZ) &&
(adev->asic_type != CHIP_VEGA20) &&
(adev->asic_type != CHIP_ARCTURUS)) /* enable runpm on VI+ */
adev->runpm = true;
else if (amdgpu_device_supports_baco(dev) &&
(amdgpu_runtime_pm > 0)) /* enable runpm if runpm=1 on CI */
adev->runpm = true; adev->runpm = true;
/* Call ACPI methods: require modeset init /* Call ACPI methods: require modeset init

View File

@@ -1318,6 +1318,11 @@ void amdgpu_bo_release_notify(struct ttm_buffer_object *bo)
if (abo->kfd_bo) if (abo->kfd_bo)
amdgpu_amdkfd_unreserve_memory_limit(abo); amdgpu_amdkfd_unreserve_memory_limit(abo);
/* We only remove the fence if the resv has individualized. */
WARN_ON_ONCE(bo->base.resv != &bo->base._resv);
if (bo->base.resv == &bo->base._resv)
amdgpu_amdkfd_remove_fence_on_pt_pd_bos(abo);
if (bo->mem.mem_type != TTM_PL_VRAM || !bo->mem.mm_node || if (bo->mem.mem_type != TTM_PL_VRAM || !bo->mem.mm_node ||
!(abo->flags & AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE)) !(abo->flags & AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE))
return; return;
@@ -1414,27 +1419,49 @@ void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence,
} }
/** /**
* amdgpu_sync_wait_resv - Wait for BO reservation fences * amdgpu_bo_sync_wait_resv - Wait for BO reservation fences
* *
* @bo: buffer object * @adev: amdgpu device pointer
* @resv: reservation object to sync to
* @sync_mode: synchronization mode
* @owner: fence owner * @owner: fence owner
* @intr: Whether the wait is interruptible * @intr: Whether the wait is interruptible
* *
* Extract the fences from the reservation object and waits for them to finish.
*
* Returns:
* 0 on success, errno otherwise.
*/
int amdgpu_bo_sync_wait_resv(struct amdgpu_device *adev, struct dma_resv *resv,
enum amdgpu_sync_mode sync_mode, void *owner,
bool intr)
{
struct amdgpu_sync sync;
int r;
amdgpu_sync_create(&sync);
amdgpu_sync_resv(adev, &sync, resv, sync_mode, owner);
r = amdgpu_sync_wait(&sync, intr);
amdgpu_sync_free(&sync);
return r;
}
/**
* amdgpu_bo_sync_wait - Wrapper for amdgpu_bo_sync_wait_resv
* @bo: buffer object to wait for
* @owner: fence owner
* @intr: Whether the wait is interruptible
*
* Wrapper to wait for fences in a BO.
* Returns: * Returns:
* 0 on success, errno otherwise. * 0 on success, errno otherwise.
*/ */
int amdgpu_bo_sync_wait(struct amdgpu_bo *bo, void *owner, bool intr) int amdgpu_bo_sync_wait(struct amdgpu_bo *bo, void *owner, bool intr)
{ {
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
struct amdgpu_sync sync;
int r;
amdgpu_sync_create(&sync); return amdgpu_bo_sync_wait_resv(adev, bo->tbo.base.resv,
amdgpu_sync_resv(adev, &sync, bo->tbo.base.resv, owner, false); AMDGPU_SYNC_NE_OWNER, owner, intr);
r = amdgpu_sync_wait(&sync, intr);
amdgpu_sync_free(&sync);
return r;
} }
/** /**

View File

@@ -277,6 +277,9 @@ void amdgpu_bo_release_notify(struct ttm_buffer_object *bo);
int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo); int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence, void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence,
bool shared); bool shared);
int amdgpu_bo_sync_wait_resv(struct amdgpu_device *adev, struct dma_resv *resv,
enum amdgpu_sync_mode sync_mode, void *owner,
bool intr);
int amdgpu_bo_sync_wait(struct amdgpu_bo *bo, void *owner, bool intr); int amdgpu_bo_sync_wait(struct amdgpu_bo *bo, void *owner, bool intr);
u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo); u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo);
int amdgpu_bo_validate(struct amdgpu_bo *bo); int amdgpu_bo_validate(struct amdgpu_bo *bo);
@@ -316,6 +319,7 @@ void amdgpu_sa_bo_free(struct amdgpu_device *adev,
void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager, void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager,
struct seq_file *m); struct seq_file *m);
#endif #endif
int amdgpu_debugfs_sa_init(struct amdgpu_device *adev);
bool amdgpu_bo_support_uswc(u64 bo_flags); bool amdgpu_bo_support_uswc(u64 bo_flags);

View File

@@ -41,8 +41,6 @@
#include "hwmgr.h" #include "hwmgr.h"
#define WIDTH_4K 3840 #define WIDTH_4K 3840
static int amdgpu_debugfs_pm_init(struct amdgpu_device *adev);
static const struct cg_flag_name clocks[] = { static const struct cg_flag_name clocks[] = {
{AMD_CG_SUPPORT_GFX_MGCG, "Graphics Medium Grain Clock Gating"}, {AMD_CG_SUPPORT_GFX_MGCG, "Graphics Medium Grain Clock Gating"},
{AMD_CG_SUPPORT_GFX_MGLS, "Graphics Medium Grain memory Light Sleep"}, {AMD_CG_SUPPORT_GFX_MGLS, "Graphics Medium Grain memory Light Sleep"},
@@ -3398,11 +3396,6 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
DRM_ERROR("failed to create device file unique_id\n"); DRM_ERROR("failed to create device file unique_id\n");
return ret; return ret;
} }
ret = amdgpu_debugfs_pm_init(adev);
if (ret) {
DRM_ERROR("Failed to register debugfs file for dpm!\n");
return ret;
}
if ((adev->asic_type >= CHIP_VEGA10) && if ((adev->asic_type >= CHIP_VEGA10) &&
!(adev->flags & AMD_IS_APU)) { !(adev->flags & AMD_IS_APU)) {
@@ -3669,7 +3662,7 @@ static const struct drm_info_list amdgpu_pm_info_list[] = {
}; };
#endif #endif
static int amdgpu_debugfs_pm_init(struct amdgpu_device *adev) int amdgpu_debugfs_pm_init(struct amdgpu_device *adev)
{ {
#if defined(CONFIG_DEBUG_FS) #if defined(CONFIG_DEBUG_FS)
return amdgpu_debugfs_add_files(adev, amdgpu_pm_info_list, ARRAY_SIZE(amdgpu_pm_info_list)); return amdgpu_debugfs_add_files(adev, amdgpu_pm_info_list, ARRAY_SIZE(amdgpu_pm_info_list));

View File

@@ -43,4 +43,6 @@ void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable);
void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable); void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable);
void amdgpu_dpm_enable_jpeg(struct amdgpu_device *adev, bool enable); void amdgpu_dpm_enable_jpeg(struct amdgpu_device *adev, bool enable);
int amdgpu_debugfs_pm_init(struct amdgpu_device *adev);
#endif #endif

View File

@@ -38,6 +38,39 @@
static void psp_set_funcs(struct amdgpu_device *adev); static void psp_set_funcs(struct amdgpu_device *adev);
/*
* Due to DF Cstate management centralized to PMFW, the firmware
* loading sequence will be updated as below:
* - Load KDB
* - Load SYS_DRV
* - Load tOS
* - Load PMFW
* - Setup TMR
* - Load other non-psp fw
* - Load ASD
* - Load XGMI/RAS/HDCP/DTM TA if any
*
* This new sequence is required for
* - Arcturus
* - Navi12 and onwards
*/
static void psp_check_pmfw_centralized_cstate_management(struct psp_context *psp)
{
struct amdgpu_device *adev = psp->adev;
psp->pmfw_centralized_cstate_management = false;
if (amdgpu_sriov_vf(adev))
return;
if (adev->flags & AMD_IS_APU)
return;
if ((adev->asic_type == CHIP_ARCTURUS) ||
(adev->asic_type >= CHIP_NAVI12))
psp->pmfw_centralized_cstate_management = true;
}
static int psp_early_init(void *handle) static int psp_early_init(void *handle)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -75,6 +108,8 @@ static int psp_early_init(void *handle)
psp->adev = adev; psp->adev = adev;
psp_check_pmfw_centralized_cstate_management(psp);
return 0; return 0;
} }
@@ -558,7 +593,7 @@ int psp_xgmi_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
return psp_ta_invoke(psp, ta_cmd_id, psp->xgmi_context.session_id); return psp_ta_invoke(psp, ta_cmd_id, psp->xgmi_context.session_id);
} }
static int psp_xgmi_terminate(struct psp_context *psp) int psp_xgmi_terminate(struct psp_context *psp)
{ {
int ret; int ret;
@@ -579,7 +614,7 @@ static int psp_xgmi_terminate(struct psp_context *psp)
return 0; return 0;
} }
static int psp_xgmi_initialize(struct psp_context *psp) int psp_xgmi_initialize(struct psp_context *psp)
{ {
struct ta_xgmi_shared_memory *xgmi_cmd; struct ta_xgmi_shared_memory *xgmi_cmd;
int ret; int ret;
@@ -1013,6 +1048,30 @@ static int psp_dtm_initialize(struct psp_context *psp)
return 0; return 0;
} }
static int psp_dtm_unload(struct psp_context *psp)
{
int ret;
struct psp_gfx_cmd_resp *cmd;
/*
* TODO: bypass the unloading in sriov for now
*/
if (amdgpu_sriov_vf(psp->adev))
return 0;
cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
if (!cmd)
return -ENOMEM;
psp_prep_ta_unload_cmd_buf(cmd, psp->dtm_context.session_id);
ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
kfree(cmd);
return ret;
}
int psp_dtm_invoke(struct psp_context *psp, uint32_t ta_cmd_id) int psp_dtm_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
{ {
/* /*
@@ -1037,7 +1096,7 @@ static int psp_dtm_terminate(struct psp_context *psp)
if (!psp->dtm_context.dtm_initialized) if (!psp->dtm_context.dtm_initialized)
return 0; return 0;
ret = psp_hdcp_unload(psp); ret = psp_dtm_unload(psp);
if (ret) if (ret)
return ret; return ret;
@@ -1057,7 +1116,7 @@ static int psp_hw_start(struct psp_context *psp)
struct amdgpu_device *adev = psp->adev; struct amdgpu_device *adev = psp->adev;
int ret; int ret;
if (!amdgpu_sriov_vf(adev) || !adev->in_gpu_reset) { if (!amdgpu_sriov_vf(adev)) {
if (psp->kdb_bin_size && if (psp->kdb_bin_size &&
(psp->funcs->bootloader_load_kdb != NULL)) { (psp->funcs->bootloader_load_kdb != NULL)) {
ret = psp_bootloader_load_kdb(psp); ret = psp_bootloader_load_kdb(psp);
@@ -1092,11 +1151,18 @@ static int psp_hw_start(struct psp_context *psp)
return ret; return ret;
} }
/*
* For those ASICs with DF Cstate management centralized
* to PMFW, TMR setup should be performed after PMFW
* loaded and before other non-psp firmware loaded.
*/
if (!psp->pmfw_centralized_cstate_management) {
ret = psp_tmr_load(psp); ret = psp_tmr_load(psp);
if (ret) { if (ret) {
DRM_ERROR("PSP load tmr failed!\n"); DRM_ERROR("PSP load tmr failed!\n");
return ret; return ret;
} }
}
return 0; return 0;
} }
@@ -1292,9 +1358,10 @@ static int psp_np_fw_load(struct psp_context *psp)
struct amdgpu_firmware_info *ucode; struct amdgpu_firmware_info *ucode;
struct amdgpu_device* adev = psp->adev; struct amdgpu_device* adev = psp->adev;
if (psp->autoload_supported) { if (psp->autoload_supported ||
psp->pmfw_centralized_cstate_management) {
ucode = &adev->firmware.ucode[AMDGPU_UCODE_ID_SMC]; ucode = &adev->firmware.ucode[AMDGPU_UCODE_ID_SMC];
if (!ucode->fw) if (!ucode->fw || amdgpu_sriov_vf(adev))
goto out; goto out;
ret = psp_execute_np_fw_load(psp, ucode); ret = psp_execute_np_fw_load(psp, ucode);
@@ -1302,6 +1369,14 @@ static int psp_np_fw_load(struct psp_context *psp)
return ret; return ret;
} }
if (psp->pmfw_centralized_cstate_management) {
ret = psp_tmr_load(psp);
if (ret) {
DRM_ERROR("PSP load tmr failed!\n");
return ret;
}
}
out: out:
for (i = 0; i < adev->firmware.max_ucodes; i++) { for (i = 0; i < adev->firmware.max_ucodes; i++) {
ucode = &adev->firmware.ucode[i]; ucode = &adev->firmware.ucode[i];
@@ -1309,7 +1384,9 @@ out:
continue; continue;
if (ucode->ucode_id == AMDGPU_UCODE_ID_SMC && if (ucode->ucode_id == AMDGPU_UCODE_ID_SMC &&
(psp_smu_reload_quirk(psp) || psp->autoload_supported)) (psp_smu_reload_quirk(psp) ||
psp->autoload_supported ||
psp->pmfw_centralized_cstate_management))
continue; continue;
if (amdgpu_sriov_vf(adev) && if (amdgpu_sriov_vf(adev) &&
@@ -1420,16 +1497,6 @@ skip_memalloc:
return ret; return ret;
} }
if (adev->gmc.xgmi.num_physical_nodes > 1) {
ret = psp_xgmi_initialize(psp);
/* Warning the XGMI seesion initialize failure
* Instead of stop driver initialization
*/
if (ret)
dev_err(psp->adev->dev,
"XGMI: Failed to initialize XGMI session\n");
}
if (psp->adev->psp.ta_fw) { if (psp->adev->psp.ta_fw) {
ret = psp_ras_initialize(psp); ret = psp_ras_initialize(psp);
if (ret) if (ret)
@@ -1494,10 +1561,6 @@ static int psp_hw_fini(void *handle)
void *tmr_buf; void *tmr_buf;
void **pptr; void **pptr;
if (adev->gmc.xgmi.num_physical_nodes > 1 &&
psp->xgmi_context.initialized == 1)
psp_xgmi_terminate(psp);
if (psp->adev->psp.ta_fw) { if (psp->adev->psp.ta_fw) {
psp_ras_terminate(psp); psp_ras_terminate(psp);
psp_dtm_terminate(psp); psp_dtm_terminate(psp);

View File

@@ -264,6 +264,8 @@ struct psp_context
atomic_t fence_value; atomic_t fence_value;
/* flag to mark whether gfx fw autoload is supported or not */ /* flag to mark whether gfx fw autoload is supported or not */
bool autoload_supported; bool autoload_supported;
/* flag to mark whether df cstate management centralized to PMFW */
bool pmfw_centralized_cstate_management;
/* xgmi ta firmware and buffer */ /* xgmi ta firmware and buffer */
const struct firmware *ta_fw; const struct firmware *ta_fw;
@@ -362,6 +364,8 @@ int psp_gpu_reset(struct amdgpu_device *adev);
int psp_update_vcn_sram(struct amdgpu_device *adev, int inst_idx, int psp_update_vcn_sram(struct amdgpu_device *adev, int inst_idx,
uint64_t cmd_gpu_addr, int cmd_size); uint64_t cmd_gpu_addr, int cmd_size);
int psp_xgmi_initialize(struct psp_context *psp);
int psp_xgmi_terminate(struct psp_context *psp);
int psp_xgmi_invoke(struct psp_context *psp, uint32_t ta_cmd_id); int psp_xgmi_invoke(struct psp_context *psp, uint32_t ta_cmd_id);
int psp_ras_invoke(struct psp_context *psp, uint32_t ta_cmd_id); int psp_ras_invoke(struct psp_context *psp, uint32_t ta_cmd_id);

View File

@@ -31,6 +31,7 @@
#include "amdgpu.h" #include "amdgpu.h"
#include "amdgpu_ras.h" #include "amdgpu_ras.h"
#include "amdgpu_atomfirmware.h" #include "amdgpu_atomfirmware.h"
#include "amdgpu_xgmi.h"
#include "ivsrcid/nbio/irqsrcs_nbif_7_4.h" #include "ivsrcid/nbio/irqsrcs_nbif_7_4.h"
const char *ras_error_string[] = { const char *ras_error_string[] = {
@@ -742,20 +743,6 @@ int amdgpu_ras_error_query(struct amdgpu_device *adev,
return 0; return 0;
} }
uint64_t get_xgmi_relative_phy_addr(struct amdgpu_device *adev, uint64_t addr)
{
uint32_t df_inst_id;
if ((!adev->df.funcs) ||
(!adev->df.funcs->get_df_inst_id) ||
(!adev->df.funcs->get_dram_base_addr))
return addr;
df_inst_id = adev->df.funcs->get_df_inst_id(adev);
return addr + adev->df.funcs->get_dram_base_addr(adev, df_inst_id);
}
/* wrapper of psp_ras_trigger_error */ /* wrapper of psp_ras_trigger_error */
int amdgpu_ras_error_inject(struct amdgpu_device *adev, int amdgpu_ras_error_inject(struct amdgpu_device *adev,
struct ras_inject_if *info) struct ras_inject_if *info)
@@ -775,7 +762,8 @@ int amdgpu_ras_error_inject(struct amdgpu_device *adev,
/* Calculate XGMI relative offset */ /* Calculate XGMI relative offset */
if (adev->gmc.xgmi.num_physical_nodes > 1) { if (adev->gmc.xgmi.num_physical_nodes > 1) {
block_info.address = get_xgmi_relative_phy_addr(adev, block_info.address =
amdgpu_xgmi_get_relative_phy_addr(adev,
block_info.address); block_info.address);
} }
@@ -1319,6 +1307,33 @@ static int amdgpu_ras_interrupt_remove_all(struct amdgpu_device *adev)
} }
/* ih end */ /* ih end */
/* traversal all IPs except NBIO to query error counter */
static void amdgpu_ras_log_on_err_counter(struct amdgpu_device *adev)
{
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
struct ras_manager *obj;
if (!con)
return;
list_for_each_entry(obj, &con->head, node) {
struct ras_query_if info = {
.head = obj->head,
};
/*
* PCIE_BIF IP has one different isr by ras controller
* interrupt, the specific ras counter query will be
* done in that isr. So skip such block from common
* sync flood interrupt isr calling.
*/
if (info.head.block == AMDGPU_RAS_BLOCK__PCIE_BIF)
continue;
amdgpu_ras_error_query(adev, &info);
}
}
/* recovery begin */ /* recovery begin */
/* return 0 on success. /* return 0 on success.
@@ -1373,6 +1388,12 @@ static void amdgpu_ras_do_recovery(struct work_struct *work)
struct amdgpu_ras *ras = struct amdgpu_ras *ras =
container_of(work, struct amdgpu_ras, recovery_work); container_of(work, struct amdgpu_ras, recovery_work);
/*
* Query and print non zero error counter per IP block for
* awareness before recovering GPU.
*/
amdgpu_ras_log_on_err_counter(ras->adev);
if (amdgpu_device_should_recover_gpu(ras->adev)) if (amdgpu_device_should_recover_gpu(ras->adev))
amdgpu_device_gpu_recover(ras->adev, 0); amdgpu_device_gpu_recover(ras->adev, 0);
atomic_set(&ras->in_recovery, 0); atomic_set(&ras->in_recovery, 0);

View File

@@ -26,9 +26,11 @@
#include "amdgpu_ras.h" #include "amdgpu_ras.h"
#include <linux/bits.h> #include <linux/bits.h>
#include "smu_v11_0_i2c.h" #include "smu_v11_0_i2c.h"
#include "atom.h"
#define EEPROM_I2C_TARGET_ADDR_ARCTURUS 0xA8
#define EEPROM_I2C_TARGET_ADDR_VEGA20 0xA0 #define EEPROM_I2C_TARGET_ADDR_VEGA20 0xA0
#define EEPROM_I2C_TARGET_ADDR_ARCTURUS 0xA8
#define EEPROM_I2C_TARGET_ADDR_ARCTURUS_D342 0xA0
/* /*
* The 2 macros bellow represent the actual size in bytes that * The 2 macros bellow represent the actual size in bytes that
@@ -55,6 +57,45 @@
#define to_amdgpu_device(x) (container_of(x, struct amdgpu_ras, eeprom_control))->adev #define to_amdgpu_device(x) (container_of(x, struct amdgpu_ras, eeprom_control))->adev
static bool __get_eeprom_i2c_addr_arct(struct amdgpu_device *adev,
uint16_t *i2c_addr)
{
struct atom_context *atom_ctx = adev->mode_info.atom_context;
if (!i2c_addr || !atom_ctx)
return false;
if (strnstr(atom_ctx->vbios_version,
"D342",
sizeof(atom_ctx->vbios_version)))
*i2c_addr = EEPROM_I2C_TARGET_ADDR_ARCTURUS_D342;
else
*i2c_addr = EEPROM_I2C_TARGET_ADDR_ARCTURUS;
return true;
}
static bool __get_eeprom_i2c_addr(struct amdgpu_device *adev,
uint16_t *i2c_addr)
{
if (!i2c_addr)
return false;
switch (adev->asic_type) {
case CHIP_VEGA20:
*i2c_addr = EEPROM_I2C_TARGET_ADDR_VEGA20;
break;
case CHIP_ARCTURUS:
return __get_eeprom_i2c_addr_arct(adev, i2c_addr);
default:
return false;
}
return true;
}
static void __encode_table_header_to_buff(struct amdgpu_ras_eeprom_table_header *hdr, static void __encode_table_header_to_buff(struct amdgpu_ras_eeprom_table_header *hdr,
unsigned char *buff) unsigned char *buff)
{ {
@@ -103,8 +144,6 @@ static int __update_table_header(struct amdgpu_ras_eeprom_control *control,
return ret; return ret;
} }
static uint32_t __calc_hdr_byte_sum(struct amdgpu_ras_eeprom_control *control) static uint32_t __calc_hdr_byte_sum(struct amdgpu_ras_eeprom_control *control)
{ {
int i; int i;
@@ -212,16 +251,17 @@ int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control)
.buf = buff, .buf = buff,
}; };
if (!__get_eeprom_i2c_addr(adev, &control->i2c_address))
return -EINVAL;
mutex_init(&control->tbl_mutex); mutex_init(&control->tbl_mutex);
switch (adev->asic_type) { switch (adev->asic_type) {
case CHIP_VEGA20: case CHIP_VEGA20:
control->i2c_address = EEPROM_I2C_TARGET_ADDR_VEGA20;
ret = smu_v11_0_i2c_eeprom_control_init(&control->eeprom_accessor); ret = smu_v11_0_i2c_eeprom_control_init(&control->eeprom_accessor);
break; break;
case CHIP_ARCTURUS: case CHIP_ARCTURUS:
control->i2c_address = EEPROM_I2C_TARGET_ADDR_ARCTURUS;
ret = smu_i2c_eeprom_init(&adev->smu, &control->eeprom_accessor); ret = smu_i2c_eeprom_init(&adev->smu, &control->eeprom_accessor);
break; break;

View File

@@ -48,9 +48,6 @@
* wptr. The GPU then starts fetching commands and executes * wptr. The GPU then starts fetching commands and executes
* them until the pointers are equal again. * them until the pointers are equal again.
*/ */
static int amdgpu_debugfs_ring_init(struct amdgpu_device *adev,
struct amdgpu_ring *ring);
static void amdgpu_debugfs_ring_fini(struct amdgpu_ring *ring);
/** /**
* amdgpu_ring_alloc - allocate space on the ring buffer * amdgpu_ring_alloc - allocate space on the ring buffer
@@ -334,10 +331,6 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
for (i = 0; i < DRM_SCHED_PRIORITY_MAX; ++i) for (i = 0; i < DRM_SCHED_PRIORITY_MAX; ++i)
atomic_set(&ring->num_jobs[i], 0); atomic_set(&ring->num_jobs[i], 0);
if (amdgpu_debugfs_ring_init(adev, ring)) {
DRM_ERROR("Failed to register debugfs file for rings !\n");
}
return 0; return 0;
} }
@@ -351,12 +344,13 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
*/ */
void amdgpu_ring_fini(struct amdgpu_ring *ring) void amdgpu_ring_fini(struct amdgpu_ring *ring)
{ {
ring->sched.ready = false;
/* Not to finish a ring which is not initialized */ /* Not to finish a ring which is not initialized */
if (!(ring->adev) || !(ring->adev->rings[ring->idx])) if (!(ring->adev) || !(ring->adev->rings[ring->idx]))
return; return;
ring->sched.ready = false;
amdgpu_device_wb_free(ring->adev, ring->rptr_offs); amdgpu_device_wb_free(ring->adev, ring->rptr_offs);
amdgpu_device_wb_free(ring->adev, ring->wptr_offs); amdgpu_device_wb_free(ring->adev, ring->wptr_offs);
@@ -367,8 +361,6 @@ void amdgpu_ring_fini(struct amdgpu_ring *ring)
&ring->gpu_addr, &ring->gpu_addr,
(void **)&ring->ring); (void **)&ring->ring);
amdgpu_debugfs_ring_fini(ring);
dma_fence_put(ring->vmid_wait); dma_fence_put(ring->vmid_wait);
ring->vmid_wait = NULL; ring->vmid_wait = NULL;
ring->me = 0; ring->me = 0;
@@ -485,7 +477,7 @@ static const struct file_operations amdgpu_debugfs_ring_fops = {
#endif #endif
static int amdgpu_debugfs_ring_init(struct amdgpu_device *adev, int amdgpu_debugfs_ring_init(struct amdgpu_device *adev,
struct amdgpu_ring *ring) struct amdgpu_ring *ring)
{ {
#if defined(CONFIG_DEBUG_FS) #if defined(CONFIG_DEBUG_FS)
@@ -507,7 +499,7 @@ static int amdgpu_debugfs_ring_init(struct amdgpu_device *adev,
return 0; return 0;
} }
static void amdgpu_debugfs_ring_fini(struct amdgpu_ring *ring) void amdgpu_debugfs_ring_fini(struct amdgpu_ring *ring)
{ {
#if defined(CONFIG_DEBUG_FS) #if defined(CONFIG_DEBUG_FS)
debugfs_remove(ring->ent); debugfs_remove(ring->ent);

View File

@@ -328,4 +328,8 @@ static inline void amdgpu_ring_write_multiple(struct amdgpu_ring *ring,
int amdgpu_ring_test_helper(struct amdgpu_ring *ring); int amdgpu_ring_test_helper(struct amdgpu_ring *ring);
int amdgpu_debugfs_ring_init(struct amdgpu_device *adev,
struct amdgpu_ring *ring);
void amdgpu_debugfs_ring_fini(struct amdgpu_ring *ring);
#endif #endif

View File

@@ -70,7 +70,8 @@ uint64_t amdgpu_sdma_get_csa_mc_addr(struct amdgpu_ring *ring,
uint32_t index = 0; uint32_t index = 0;
int r; int r;
if (vmid == 0 || !amdgpu_mcbp) /* don't enable OS preemption on SDMA under SRIOV */
if (amdgpu_sriov_vf(adev) || vmid == 0 || !amdgpu_mcbp)
return 0; return 0;
r = amdgpu_sdma_get_index_from_ring(ring, &index); r = amdgpu_sdma_get_index_from_ring(ring, &index);

View File

@@ -202,18 +202,17 @@ int amdgpu_sync_vm_fence(struct amdgpu_sync *sync, struct dma_fence *fence)
* *
* @sync: sync object to add fences from reservation object to * @sync: sync object to add fences from reservation object to
* @resv: reservation object with embedded fence * @resv: reservation object with embedded fence
* @explicit_sync: true if we should only sync to the exclusive fence * @mode: how owner affects which fences we sync to
* @owner: owner of the planned job submission
* *
* Sync to the fence * Sync to the fence
*/ */
int amdgpu_sync_resv(struct amdgpu_device *adev, int amdgpu_sync_resv(struct amdgpu_device *adev, struct amdgpu_sync *sync,
struct amdgpu_sync *sync, struct dma_resv *resv, enum amdgpu_sync_mode mode,
struct dma_resv *resv, void *owner)
void *owner, bool explicit_sync)
{ {
struct dma_resv_list *flist; struct dma_resv_list *flist;
struct dma_fence *f; struct dma_fence *f;
void *fence_owner;
unsigned i; unsigned i;
int r = 0; int r = 0;
@@ -229,29 +228,45 @@ int amdgpu_sync_resv(struct amdgpu_device *adev,
return r; return r;
for (i = 0; i < flist->shared_count; ++i) { for (i = 0; i < flist->shared_count; ++i) {
void *fence_owner;
f = rcu_dereference_protected(flist->shared[i], f = rcu_dereference_protected(flist->shared[i],
dma_resv_held(resv)); dma_resv_held(resv));
fence_owner = amdgpu_sync_get_owner(f);
/* Always sync to moves, no matter what */
if (fence_owner == AMDGPU_FENCE_OWNER_UNDEFINED) {
r = amdgpu_sync_fence(sync, f, false);
if (r)
break;
}
/* We only want to trigger KFD eviction fences on /* We only want to trigger KFD eviction fences on
* evict or move jobs. Skip KFD fences otherwise. * evict or move jobs. Skip KFD fences otherwise.
*/ */
fence_owner = amdgpu_sync_get_owner(f);
if (fence_owner == AMDGPU_FENCE_OWNER_KFD && if (fence_owner == AMDGPU_FENCE_OWNER_KFD &&
owner != AMDGPU_FENCE_OWNER_UNDEFINED) owner != AMDGPU_FENCE_OWNER_UNDEFINED)
continue; continue;
if (amdgpu_sync_same_dev(adev, f)) { /* Ignore fences depending on the sync mode */
/* VM updates only sync with moves but not with user switch (mode) {
* command submissions or KFD evictions fences case AMDGPU_SYNC_ALWAYS:
*/ break;
if (owner == AMDGPU_FENCE_OWNER_VM &&
fence_owner != AMDGPU_FENCE_OWNER_UNDEFINED)
continue;
/* Ignore fence from the same owner and explicit one as case AMDGPU_SYNC_NE_OWNER:
* long as it isn't undefined. if (amdgpu_sync_same_dev(adev, f) &&
*/ fence_owner == owner)
if (owner != AMDGPU_FENCE_OWNER_UNDEFINED && continue;
(fence_owner == owner || explicit_sync)) break;
case AMDGPU_SYNC_EQ_OWNER:
if (amdgpu_sync_same_dev(adev, f) &&
fence_owner != owner)
continue;
break;
case AMDGPU_SYNC_EXPLICIT:
continue; continue;
} }

View File

@@ -31,6 +31,13 @@ struct dma_resv;
struct amdgpu_device; struct amdgpu_device;
struct amdgpu_ring; struct amdgpu_ring;
enum amdgpu_sync_mode {
AMDGPU_SYNC_ALWAYS,
AMDGPU_SYNC_NE_OWNER,
AMDGPU_SYNC_EQ_OWNER,
AMDGPU_SYNC_EXPLICIT
};
/* /*
* Container for fences used to sync command submissions. * Container for fences used to sync command submissions.
*/ */
@@ -43,11 +50,9 @@ void amdgpu_sync_create(struct amdgpu_sync *sync);
int amdgpu_sync_fence(struct amdgpu_sync *sync, struct dma_fence *f, int amdgpu_sync_fence(struct amdgpu_sync *sync, struct dma_fence *f,
bool explicit); bool explicit);
int amdgpu_sync_vm_fence(struct amdgpu_sync *sync, struct dma_fence *fence); int amdgpu_sync_vm_fence(struct amdgpu_sync *sync, struct dma_fence *fence);
int amdgpu_sync_resv(struct amdgpu_device *adev, int amdgpu_sync_resv(struct amdgpu_device *adev, struct amdgpu_sync *sync,
struct amdgpu_sync *sync, struct dma_resv *resv, enum amdgpu_sync_mode mode,
struct dma_resv *resv, void *owner);
void *owner,
bool explicit_sync);
struct dma_fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync, struct dma_fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
struct amdgpu_ring *ring); struct amdgpu_ring *ring);
struct dma_fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync, struct dma_fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync,

View File

@@ -60,15 +60,14 @@
#include "amdgpu_ras.h" #include "amdgpu_ras.h"
#include "bif/bif_4_1_d.h" #include "bif/bif_4_1_d.h"
#define AMDGPU_TTM_VRAM_MAX_DW_READ (size_t)128
static int amdgpu_map_buffer(struct ttm_buffer_object *bo, static int amdgpu_map_buffer(struct ttm_buffer_object *bo,
struct ttm_mem_reg *mem, unsigned num_pages, struct ttm_mem_reg *mem, unsigned num_pages,
uint64_t offset, unsigned window, uint64_t offset, unsigned window,
struct amdgpu_ring *ring, struct amdgpu_ring *ring,
uint64_t *addr); uint64_t *addr);
static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev);
static void amdgpu_ttm_debugfs_fini(struct amdgpu_device *adev);
/** /**
* amdgpu_init_mem_type - Initialize a memory manager for a specific type of * amdgpu_init_mem_type - Initialize a memory manager for a specific type of
* memory request. * memory request.
@@ -1591,7 +1590,7 @@ static int amdgpu_ttm_access_memory(struct ttm_buffer_object *bo,
while (len && pos < adev->gmc.mc_vram_size) { while (len && pos < adev->gmc.mc_vram_size) {
uint64_t aligned_pos = pos & ~(uint64_t)3; uint64_t aligned_pos = pos & ~(uint64_t)3;
uint32_t bytes = 4 - (pos & 3); uint64_t bytes = 4 - (pos & 3);
uint32_t shift = (pos & 3) * 8; uint32_t shift = (pos & 3) * 8;
uint32_t mask = 0xffffffff << shift; uint32_t mask = 0xffffffff << shift;
@@ -1600,6 +1599,7 @@ static int amdgpu_ttm_access_memory(struct ttm_buffer_object *bo,
bytes = len; bytes = len;
} }
if (mask != 0xffffffff) {
spin_lock_irqsave(&adev->mmio_idx_lock, flags); spin_lock_irqsave(&adev->mmio_idx_lock, flags);
WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)aligned_pos) | 0x80000000); WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)aligned_pos) | 0x80000000);
WREG32_NO_KIQ(mmMM_INDEX_HI, aligned_pos >> 31); WREG32_NO_KIQ(mmMM_INDEX_HI, aligned_pos >> 31);
@@ -1615,6 +1615,13 @@ static int amdgpu_ttm_access_memory(struct ttm_buffer_object *bo,
value = (value & mask) >> shift; value = (value & mask) >> shift;
memcpy(buf, &value, bytes); memcpy(buf, &value, bytes);
} }
} else {
bytes = (nodes->start + nodes->size) << PAGE_SHIFT;
bytes = min(bytes - pos, (uint64_t)len & ~0x3ull);
amdgpu_device_vram_access(adev, pos, (uint32_t *)buf,
bytes, write);
}
ret += bytes; ret += bytes;
buf = (uint8_t *)buf + bytes; buf = (uint8_t *)buf + bytes;
@@ -1905,12 +1912,6 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
return r; return r;
} }
/* Register debugfs entries for amdgpu_ttm */
r = amdgpu_ttm_debugfs_init(adev);
if (r) {
DRM_ERROR("Failed to init debugfs\n");
return r;
}
return 0; return 0;
} }
@@ -1932,7 +1933,6 @@ void amdgpu_ttm_fini(struct amdgpu_device *adev)
if (!adev->mman.initialized) if (!adev->mman.initialized)
return; return;
amdgpu_ttm_debugfs_fini(adev);
amdgpu_ttm_training_reserve_vram_fini(adev); amdgpu_ttm_training_reserve_vram_fini(adev);
/* return the IP Discovery TMR memory back to VRAM */ /* return the IP Discovery TMR memory back to VRAM */
amdgpu_bo_free_kernel(&adev->discovery_memory, NULL, NULL); amdgpu_bo_free_kernel(&adev->discovery_memory, NULL, NULL);
@@ -2107,8 +2107,8 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
} }
if (resv) { if (resv) {
r = amdgpu_sync_resv(adev, &job->sync, resv, r = amdgpu_sync_resv(adev, &job->sync, resv,
AMDGPU_FENCE_OWNER_UNDEFINED, AMDGPU_SYNC_ALWAYS,
false); AMDGPU_FENCE_OWNER_UNDEFINED);
if (r) { if (r) {
DRM_ERROR("sync failed (%d).\n", r); DRM_ERROR("sync failed (%d).\n", r);
goto error_free; goto error_free;
@@ -2192,7 +2192,8 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
if (resv) { if (resv) {
r = amdgpu_sync_resv(adev, &job->sync, resv, r = amdgpu_sync_resv(adev, &job->sync, resv,
AMDGPU_FENCE_OWNER_UNDEFINED, false); AMDGPU_SYNC_ALWAYS,
AMDGPU_FENCE_OWNER_UNDEFINED);
if (r) { if (r) {
DRM_ERROR("sync failed (%d).\n", r); DRM_ERROR("sync failed (%d).\n", r);
goto error_free; goto error_free;
@@ -2273,7 +2274,6 @@ static ssize_t amdgpu_ttm_vram_read(struct file *f, char __user *buf,
{ {
struct amdgpu_device *adev = file_inode(f)->i_private; struct amdgpu_device *adev = file_inode(f)->i_private;
ssize_t result = 0; ssize_t result = 0;
int r;
if (size & 0x3 || *pos & 0x3) if (size & 0x3 || *pos & 0x3)
return -EINVAL; return -EINVAL;
@@ -2281,27 +2281,19 @@ static ssize_t amdgpu_ttm_vram_read(struct file *f, char __user *buf,
if (*pos >= adev->gmc.mc_vram_size) if (*pos >= adev->gmc.mc_vram_size)
return -ENXIO; return -ENXIO;
size = min(size, (size_t)(adev->gmc.mc_vram_size - *pos));
while (size) { while (size) {
unsigned long flags; size_t bytes = min(size, AMDGPU_TTM_VRAM_MAX_DW_READ * 4);
uint32_t value; uint32_t value[AMDGPU_TTM_VRAM_MAX_DW_READ];
if (*pos >= adev->gmc.mc_vram_size) amdgpu_device_vram_access(adev, *pos, value, bytes, false);
return result; if (copy_to_user(buf, value, bytes))
return -EFAULT;
spin_lock_irqsave(&adev->mmio_idx_lock, flags); result += bytes;
WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)*pos) | 0x80000000); buf += bytes;
WREG32_NO_KIQ(mmMM_INDEX_HI, *pos >> 31); *pos += bytes;
value = RREG32_NO_KIQ(mmMM_DATA); size -= bytes;
spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
r = put_user(value, (uint32_t *)buf);
if (r)
return r;
result += 4;
buf += 4;
*pos += 4;
size -= 4;
} }
return result; return result;
@@ -2538,7 +2530,7 @@ static const struct {
#endif #endif
static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev) int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev)
{ {
#if defined(CONFIG_DEBUG_FS) #if defined(CONFIG_DEBUG_FS)
unsigned count; unsigned count;
@@ -2574,7 +2566,7 @@ static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev)
#endif #endif
} }
static void amdgpu_ttm_debugfs_fini(struct amdgpu_device *adev) void amdgpu_ttm_debugfs_fini(struct amdgpu_device *adev)
{ {
#if defined(CONFIG_DEBUG_FS) #if defined(CONFIG_DEBUG_FS)
unsigned i; unsigned i;

View File

@@ -133,4 +133,7 @@ uint64_t amdgpu_ttm_tt_pde_flags(struct ttm_tt *ttm, struct ttm_mem_reg *mem);
uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm, uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
struct ttm_mem_reg *mem); struct ttm_mem_reg *mem);
int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev);
void amdgpu_ttm_debugfs_fini(struct amdgpu_device *adev);
#endif #endif

View File

@@ -1099,7 +1099,8 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
goto err_free; goto err_free;
} else { } else {
r = amdgpu_sync_resv(adev, &job->sync, bo->tbo.base.resv, r = amdgpu_sync_resv(adev, &job->sync, bo->tbo.base.resv,
AMDGPU_FENCE_OWNER_UNDEFINED, false); AMDGPU_SYNC_ALWAYS,
AMDGPU_FENCE_OWNER_UNDEFINED);
if (r) if (r)
goto err_free; goto err_free;

View File

@@ -120,23 +120,17 @@ static inline void amdgpu_vm_eviction_unlock(struct amdgpu_vm *vm)
static unsigned amdgpu_vm_level_shift(struct amdgpu_device *adev, static unsigned amdgpu_vm_level_shift(struct amdgpu_device *adev,
unsigned level) unsigned level)
{ {
unsigned shift = 0xff;
switch (level) { switch (level) {
case AMDGPU_VM_PDB2: case AMDGPU_VM_PDB2:
case AMDGPU_VM_PDB1: case AMDGPU_VM_PDB1:
case AMDGPU_VM_PDB0: case AMDGPU_VM_PDB0:
shift = 9 * (AMDGPU_VM_PDB0 - level) + return 9 * (AMDGPU_VM_PDB0 - level) +
adev->vm_manager.block_size; adev->vm_manager.block_size;
break;
case AMDGPU_VM_PTB: case AMDGPU_VM_PTB:
shift = 0; return 0;
break;
default: default:
dev_err(adev->dev, "the level%d isn't supported.\n", level); return ~0;
} }
return shift;
} }
/** /**
@@ -235,19 +229,6 @@ static void amdgpu_vm_bo_evicted(struct amdgpu_vm_bo_base *vm_bo)
else else
list_move_tail(&vm_bo->vm_status, &vm->evicted); list_move_tail(&vm_bo->vm_status, &vm->evicted);
} }
/**
* amdgpu_vm_bo_relocated - vm_bo is reloacted
*
* @vm_bo: vm_bo which is relocated
*
* State for PDs/PTs which needs to update their parent PD.
*/
static void amdgpu_vm_bo_relocated(struct amdgpu_vm_bo_base *vm_bo)
{
list_move(&vm_bo->vm_status, &vm_bo->vm->relocated);
}
/** /**
* amdgpu_vm_bo_moved - vm_bo is moved * amdgpu_vm_bo_moved - vm_bo is moved
* *
@@ -290,6 +271,22 @@ static void amdgpu_vm_bo_invalidated(struct amdgpu_vm_bo_base *vm_bo)
spin_unlock(&vm_bo->vm->invalidated_lock); spin_unlock(&vm_bo->vm->invalidated_lock);
} }
/**
* amdgpu_vm_bo_relocated - vm_bo is reloacted
*
* @vm_bo: vm_bo which is relocated
*
* State for PDs/PTs which needs to update their parent PD.
* For the root PD, just move to idle state.
*/
static void amdgpu_vm_bo_relocated(struct amdgpu_vm_bo_base *vm_bo)
{
if (vm_bo->bo->parent)
list_move(&vm_bo->vm_status, &vm_bo->vm->relocated);
else
amdgpu_vm_bo_idle(vm_bo);
}
/** /**
* amdgpu_vm_bo_done - vm_bo is done * amdgpu_vm_bo_done - vm_bo is done
* *
@@ -588,8 +585,8 @@ void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
{ {
entry->priority = 0; entry->priority = 0;
entry->tv.bo = &vm->root.base.bo->tbo; entry->tv.bo = &vm->root.base.bo->tbo;
/* One for TTM and one for the CS job */ /* Two for VM updates, one for TTM and one for the CS job */
entry->tv.num_shared = 2; entry->tv.num_shared = 4;
entry->user_pages = NULL; entry->user_pages = NULL;
list_add(&entry->tv.head, validated); list_add(&entry->tv.head, validated);
} }
@@ -697,10 +694,7 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
amdgpu_vm_bo_moved(bo_base); amdgpu_vm_bo_moved(bo_base);
} else { } else {
vm->update_funcs->map_table(bo); vm->update_funcs->map_table(bo);
if (bo->parent)
amdgpu_vm_bo_relocated(bo_base); amdgpu_vm_bo_relocated(bo_base);
else
amdgpu_vm_bo_idle(bo_base);
} }
} }
@@ -803,7 +797,7 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
params.vm = vm; params.vm = vm;
params.direct = direct; params.direct = direct;
r = vm->update_funcs->prepare(&params, AMDGPU_FENCE_OWNER_KFD, NULL); r = vm->update_funcs->prepare(&params, NULL, AMDGPU_SYNC_EXPLICIT);
if (r) if (r)
return r; return r;
@@ -1299,7 +1293,7 @@ int amdgpu_vm_update_pdes(struct amdgpu_device *adev,
params.vm = vm; params.vm = vm;
params.direct = direct; params.direct = direct;
r = vm->update_funcs->prepare(&params, AMDGPU_FENCE_OWNER_VM, NULL); r = vm->update_funcs->prepare(&params, NULL, AMDGPU_SYNC_EXPLICIT);
if (r) if (r)
return r; return r;
@@ -1448,21 +1442,14 @@ static int amdgpu_vm_update_ptes(struct amdgpu_vm_update_params *params,
uint64_t incr, entry_end, pe_start; uint64_t incr, entry_end, pe_start;
struct amdgpu_bo *pt; struct amdgpu_bo *pt;
/* make sure that the page tables covering the address range are if (flags & AMDGPU_PTE_VALID) {
* actually allocated /* make sure that the page tables covering the
* address range are actually allocated
*/ */
r = amdgpu_vm_alloc_pts(params->adev, params->vm, &cursor, r = amdgpu_vm_alloc_pts(params->adev, params->vm,
params->direct); &cursor, params->direct);
if (r) if (r)
return r; return r;
pt = cursor.entry->base.bo;
/* The root level can't be a huge page */
if (cursor.level == adev->vm_manager.root_level) {
if (!amdgpu_vm_pt_descendant(adev, &cursor))
return -ENOENT;
continue;
} }
shift = amdgpu_vm_level_shift(adev, cursor.level); shift = amdgpu_vm_level_shift(adev, cursor.level);
@@ -1480,25 +1467,38 @@ static int amdgpu_vm_update_ptes(struct amdgpu_vm_update_params *params,
* smaller than the address shift. Go to the next * smaller than the address shift. Go to the next
* child entry and try again. * child entry and try again.
*/ */
if (!amdgpu_vm_pt_descendant(adev, &cursor)) if (amdgpu_vm_pt_descendant(adev, &cursor))
return -ENOENT;
continue; continue;
} else if (frag >= parent_shift && } else if (frag >= parent_shift) {
cursor.level - 1 != adev->vm_manager.root_level) {
/* If the fragment size is even larger than the parent /* If the fragment size is even larger than the parent
* shift we should go up one level and check it again * shift we should go up one level and check it again.
* unless one level up is the root level.
*/ */
if (!amdgpu_vm_pt_ancestor(&cursor)) if (!amdgpu_vm_pt_ancestor(&cursor))
return -ENOENT; return -EINVAL;
continue; continue;
} }
pt = cursor.entry->base.bo;
if (!pt) {
/* We need all PDs and PTs for mapping something, */
if (flags & AMDGPU_PTE_VALID)
return -ENOENT;
/* but unmapping something can happen at a higher
* level.
*/
if (!amdgpu_vm_pt_ancestor(&cursor))
return -EINVAL;
pt = cursor.entry->base.bo;
shift = parent_shift;
}
/* Looks good so far, calculate parameters for the update */ /* Looks good so far, calculate parameters for the update */
incr = (uint64_t)AMDGPU_GPU_PAGE_SIZE << shift; incr = (uint64_t)AMDGPU_GPU_PAGE_SIZE << shift;
mask = amdgpu_vm_entries_mask(adev, cursor.level); mask = amdgpu_vm_entries_mask(adev, cursor.level);
pe_start = ((cursor.pfn >> shift) & mask) * 8; pe_start = ((cursor.pfn >> shift) & mask) * 8;
entry_end = (uint64_t)(mask + 1) << shift; entry_end = ((uint64_t)mask + 1) << shift;
entry_end += cursor.pfn & ~(entry_end - 1); entry_end += cursor.pfn & ~(entry_end - 1);
entry_end = min(entry_end, end); entry_end = min(entry_end, end);
@@ -1506,6 +1506,10 @@ static int amdgpu_vm_update_ptes(struct amdgpu_vm_update_params *params,
uint64_t upd_end = min(entry_end, frag_end); uint64_t upd_end = min(entry_end, frag_end);
unsigned nptes = (upd_end - frag_start) >> shift; unsigned nptes = (upd_end - frag_start) >> shift;
/* This can happen when we set higher level PDs to
* silent to stop fault floods.
*/
nptes = max(nptes, 1u);
amdgpu_vm_update_flags(params, pt, cursor.level, amdgpu_vm_update_flags(params, pt, cursor.level,
pe_start, dst, nptes, incr, pe_start, dst, nptes, incr,
flags | AMDGPU_PTE_FRAG(frag)); flags | AMDGPU_PTE_FRAG(frag));
@@ -1550,7 +1554,7 @@ static int amdgpu_vm_update_ptes(struct amdgpu_vm_update_params *params,
* @adev: amdgpu_device pointer * @adev: amdgpu_device pointer
* @vm: requested vm * @vm: requested vm
* @direct: direct submission in a page fault * @direct: direct submission in a page fault
* @exclusive: fence we need to sync to * @resv: fences we need to sync to
* @start: start of mapped range * @start: start of mapped range
* @last: last mapped entry * @last: last mapped entry
* @flags: flags for the entries * @flags: flags for the entries
@@ -1565,14 +1569,14 @@ static int amdgpu_vm_update_ptes(struct amdgpu_vm_update_params *params,
*/ */
static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
struct amdgpu_vm *vm, bool direct, struct amdgpu_vm *vm, bool direct,
struct dma_fence *exclusive, struct dma_resv *resv,
uint64_t start, uint64_t last, uint64_t start, uint64_t last,
uint64_t flags, uint64_t addr, uint64_t flags, uint64_t addr,
dma_addr_t *pages_addr, dma_addr_t *pages_addr,
struct dma_fence **fence) struct dma_fence **fence)
{ {
struct amdgpu_vm_update_params params; struct amdgpu_vm_update_params params;
void *owner = AMDGPU_FENCE_OWNER_VM; enum amdgpu_sync_mode sync_mode;
int r; int r;
memset(&params, 0, sizeof(params)); memset(&params, 0, sizeof(params));
@@ -1581,9 +1585,13 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
params.direct = direct; params.direct = direct;
params.pages_addr = pages_addr; params.pages_addr = pages_addr;
/* sync to everything except eviction fences on unmapping */ /* Implicitly sync to command submissions in the same VM before
* unmapping. Sync to moving fences before mapping.
*/
if (!(flags & AMDGPU_PTE_VALID)) if (!(flags & AMDGPU_PTE_VALID))
owner = AMDGPU_FENCE_OWNER_KFD; sync_mode = AMDGPU_SYNC_EQ_OWNER;
else
sync_mode = AMDGPU_SYNC_EXPLICIT;
amdgpu_vm_eviction_lock(vm); amdgpu_vm_eviction_lock(vm);
if (vm->evicting) { if (vm->evicting) {
@@ -1591,7 +1599,17 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
goto error_unlock; goto error_unlock;
} }
r = vm->update_funcs->prepare(&params, owner, exclusive); if (flags & AMDGPU_PTE_VALID) {
struct amdgpu_bo *root = vm->root.base.bo;
if (!dma_fence_is_signaled(vm->last_direct))
amdgpu_bo_fence(root, vm->last_direct, true);
if (!dma_fence_is_signaled(vm->last_delayed))
amdgpu_bo_fence(root, vm->last_delayed, true);
}
r = vm->update_funcs->prepare(&params, resv, sync_mode);
if (r) if (r)
goto error_unlock; goto error_unlock;
@@ -1610,7 +1628,7 @@ error_unlock:
* amdgpu_vm_bo_split_mapping - split a mapping into smaller chunks * amdgpu_vm_bo_split_mapping - split a mapping into smaller chunks
* *
* @adev: amdgpu_device pointer * @adev: amdgpu_device pointer
* @exclusive: fence we need to sync to * @resv: fences we need to sync to
* @pages_addr: DMA addresses to use for mapping * @pages_addr: DMA addresses to use for mapping
* @vm: requested vm * @vm: requested vm
* @mapping: mapped range and flags to use for the update * @mapping: mapped range and flags to use for the update
@@ -1626,7 +1644,7 @@ error_unlock:
* 0 for success, -EINVAL for failure. * 0 for success, -EINVAL for failure.
*/ */
static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev, static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
struct dma_fence *exclusive, struct dma_resv *resv,
dma_addr_t *pages_addr, dma_addr_t *pages_addr,
struct amdgpu_vm *vm, struct amdgpu_vm *vm,
struct amdgpu_bo_va_mapping *mapping, struct amdgpu_bo_va_mapping *mapping,
@@ -1702,7 +1720,7 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
} }
last = min((uint64_t)mapping->last, start + max_entries - 1); last = min((uint64_t)mapping->last, start + max_entries - 1);
r = amdgpu_vm_bo_update_mapping(adev, vm, false, exclusive, r = amdgpu_vm_bo_update_mapping(adev, vm, false, resv,
start, last, flags, addr, start, last, flags, addr,
dma_addr, fence); dma_addr, fence);
if (r) if (r)
@@ -1741,7 +1759,8 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
dma_addr_t *pages_addr = NULL; dma_addr_t *pages_addr = NULL;
struct ttm_mem_reg *mem; struct ttm_mem_reg *mem;
struct drm_mm_node *nodes; struct drm_mm_node *nodes;
struct dma_fence *exclusive, **last_update; struct dma_fence **last_update;
struct dma_resv *resv;
uint64_t flags; uint64_t flags;
struct amdgpu_device *bo_adev = adev; struct amdgpu_device *bo_adev = adev;
int r; int r;
@@ -1749,7 +1768,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
if (clear || !bo) { if (clear || !bo) {
mem = NULL; mem = NULL;
nodes = NULL; nodes = NULL;
exclusive = NULL; resv = vm->root.base.bo->tbo.base.resv;
} else { } else {
struct ttm_dma_tt *ttm; struct ttm_dma_tt *ttm;
@@ -1759,7 +1778,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
ttm = container_of(bo->tbo.ttm, struct ttm_dma_tt, ttm); ttm = container_of(bo->tbo.ttm, struct ttm_dma_tt, ttm);
pages_addr = ttm->dma_address; pages_addr = ttm->dma_address;
} }
exclusive = bo->tbo.moving; resv = bo->tbo.base.resv;
} }
if (bo) { if (bo) {
@@ -1769,7 +1788,8 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
flags = 0x0; flags = 0x0;
} }
if (clear || (bo && bo->tbo.base.resv == vm->root.base.bo->tbo.base.resv)) if (clear || (bo && bo->tbo.base.resv ==
vm->root.base.bo->tbo.base.resv))
last_update = &vm->last_update; last_update = &vm->last_update;
else else
last_update = &bo_va->last_pt_update; last_update = &bo_va->last_pt_update;
@@ -1783,7 +1803,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
} }
list_for_each_entry(mapping, &bo_va->invalids, list) { list_for_each_entry(mapping, &bo_va->invalids, list) {
r = amdgpu_vm_bo_split_mapping(adev, exclusive, pages_addr, vm, r = amdgpu_vm_bo_split_mapping(adev, resv, pages_addr, vm,
mapping, flags, bo_adev, nodes, mapping, flags, bo_adev, nodes,
last_update); last_update);
if (r) if (r)
@@ -1978,6 +1998,7 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
struct amdgpu_vm *vm, struct amdgpu_vm *vm,
struct dma_fence **fence) struct dma_fence **fence)
{ {
struct dma_resv *resv = vm->root.base.bo->tbo.base.resv;
struct amdgpu_bo_va_mapping *mapping; struct amdgpu_bo_va_mapping *mapping;
uint64_t init_pte_value = 0; uint64_t init_pte_value = 0;
struct dma_fence *f = NULL; struct dma_fence *f = NULL;
@@ -1992,7 +2013,7 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
mapping->start < AMDGPU_GMC_HOLE_START) mapping->start < AMDGPU_GMC_HOLE_START)
init_pte_value = AMDGPU_PTE_DEFAULT_ATC; init_pte_value = AMDGPU_PTE_DEFAULT_ATC;
r = amdgpu_vm_bo_update_mapping(adev, vm, false, NULL, r = amdgpu_vm_bo_update_mapping(adev, vm, false, resv,
mapping->start, mapping->last, mapping->start, mapping->last,
init_pte_value, 0, NULL, &f); init_pte_value, 0, NULL, &f);
amdgpu_vm_free_mapping(adev, vm, mapping, f); amdgpu_vm_free_mapping(adev, vm, mapping, f);

View File

@@ -227,8 +227,8 @@ struct amdgpu_vm_update_params {
struct amdgpu_vm_update_funcs { struct amdgpu_vm_update_funcs {
int (*map_table)(struct amdgpu_bo *bo); int (*map_table)(struct amdgpu_bo *bo);
int (*prepare)(struct amdgpu_vm_update_params *p, void * owner, int (*prepare)(struct amdgpu_vm_update_params *p, struct dma_resv *resv,
struct dma_fence *exclusive); enum amdgpu_sync_mode sync_mode);
int (*update)(struct amdgpu_vm_update_params *p, int (*update)(struct amdgpu_vm_update_params *p,
struct amdgpu_bo *bo, uint64_t pe, uint64_t addr, struct amdgpu_bo *bo, uint64_t pe, uint64_t addr,
unsigned count, uint32_t incr, uint64_t flags); unsigned count, uint32_t incr, uint64_t flags);

View File

@@ -44,26 +44,14 @@ static int amdgpu_vm_cpu_map_table(struct amdgpu_bo *table)
* Returns: * Returns:
* Negativ errno, 0 for success. * Negativ errno, 0 for success.
*/ */
static int amdgpu_vm_cpu_prepare(struct amdgpu_vm_update_params *p, void *owner, static int amdgpu_vm_cpu_prepare(struct amdgpu_vm_update_params *p,
struct dma_fence *exclusive) struct dma_resv *resv,
enum amdgpu_sync_mode sync_mode)
{ {
int r; if (!resv)
/* Wait for any BO move to be completed */
if (exclusive) {
r = dma_fence_wait(exclusive, true);
if (unlikely(r))
return r;
}
/* Don't wait for submissions during page fault */
if (p->direct)
return 0; return 0;
/* Wait for PT BOs to be idle. PTs share the same resv. object return amdgpu_bo_sync_wait_resv(p->adev, resv, sync_mode, p->vm, true);
* as the root PD BO
*/
return amdgpu_bo_sync_wait(p->vm->root.base.bo, owner, true);
} }
/** /**
@@ -86,6 +74,13 @@ static int amdgpu_vm_cpu_update(struct amdgpu_vm_update_params *p,
{ {
unsigned int i; unsigned int i;
uint64_t value; uint64_t value;
int r;
if (bo->tbo.moving) {
r = dma_fence_wait(bo->tbo.moving, true);
if (r)
return r;
}
pe += (unsigned long)amdgpu_bo_kptr(bo); pe += (unsigned long)amdgpu_bo_kptr(bo);

View File

@@ -58,9 +58,9 @@ static int amdgpu_vm_sdma_map_table(struct amdgpu_bo *table)
* Negativ errno, 0 for success. * Negativ errno, 0 for success.
*/ */
static int amdgpu_vm_sdma_prepare(struct amdgpu_vm_update_params *p, static int amdgpu_vm_sdma_prepare(struct amdgpu_vm_update_params *p,
void *owner, struct dma_fence *exclusive) struct dma_resv *resv,
enum amdgpu_sync_mode sync_mode)
{ {
struct amdgpu_bo *root = p->vm->root.base.bo;
unsigned int ndw = AMDGPU_VM_SDMA_MIN_NUM_DW; unsigned int ndw = AMDGPU_VM_SDMA_MIN_NUM_DW;
int r; int r;
@@ -70,17 +70,10 @@ static int amdgpu_vm_sdma_prepare(struct amdgpu_vm_update_params *p,
p->num_dw_left = ndw; p->num_dw_left = ndw;
/* Wait for moves to be completed */ if (!resv)
r = amdgpu_sync_fence(&p->job->sync, exclusive, false);
if (r)
return r;
/* Don't wait for any submissions during page fault handling */
if (p->direct)
return 0; return 0;
return amdgpu_sync_resv(p->adev, &p->job->sync, root->tbo.base.resv, return amdgpu_sync_resv(p->adev, &p->job->sync, resv, sync_mode, p->vm);
owner, false);
} }
/** /**
@@ -147,7 +140,7 @@ static void amdgpu_vm_sdma_copy_ptes(struct amdgpu_vm_update_params *p,
src += p->num_dw_left * 4; src += p->num_dw_left * 4;
pe += amdgpu_bo_gpu_offset(bo); pe += amdgpu_gmc_sign_extend(bo->tbo.offset);
trace_amdgpu_vm_copy_ptes(pe, src, count, p->direct); trace_amdgpu_vm_copy_ptes(pe, src, count, p->direct);
amdgpu_vm_copy_pte(p->adev, ib, pe, src, count); amdgpu_vm_copy_pte(p->adev, ib, pe, src, count);
@@ -174,7 +167,7 @@ static void amdgpu_vm_sdma_set_ptes(struct amdgpu_vm_update_params *p,
{ {
struct amdgpu_ib *ib = p->job->ibs; struct amdgpu_ib *ib = p->job->ibs;
pe += amdgpu_bo_gpu_offset(bo); pe += amdgpu_gmc_sign_extend(bo->tbo.offset);
trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags, p->direct); trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags, p->direct);
if (count < 3) { if (count < 3) {
amdgpu_vm_write_pte(p->adev, ib, pe, addr | flags, amdgpu_vm_write_pte(p->adev, ib, pe, addr | flags,
@@ -208,6 +201,11 @@ static int amdgpu_vm_sdma_update(struct amdgpu_vm_update_params *p,
uint64_t *pte; uint64_t *pte;
int r; int r;
/* Wait for PD/PT moves to be completed */
r = amdgpu_sync_fence(&p->job->sync, bo->tbo.moving, false);
if (r)
return r;
do { do {
ndw = p->num_dw_left; ndw = p->num_dw_left;
ndw -= p->job->ibs->length_dw; ndw -= p->job->ibs->length_dw;

View File

@@ -365,6 +365,13 @@ int amdgpu_xgmi_add_device(struct amdgpu_device *adev)
return 0; return 0;
if (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_PSP)) { if (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_PSP)) {
ret = psp_xgmi_initialize(&adev->psp);
if (ret) {
dev_err(adev->dev,
"XGMI: Failed to initialize xgmi session\n");
return ret;
}
ret = psp_xgmi_get_hive_id(&adev->psp, &adev->gmc.xgmi.hive_id); ret = psp_xgmi_get_hive_id(&adev->psp, &adev->gmc.xgmi.hive_id);
if (ret) { if (ret) {
dev_err(adev->dev, dev_err(adev->dev,
@@ -451,16 +458,16 @@ exit:
return ret; return ret;
} }
void amdgpu_xgmi_remove_device(struct amdgpu_device *adev) int amdgpu_xgmi_remove_device(struct amdgpu_device *adev)
{ {
struct amdgpu_hive_info *hive; struct amdgpu_hive_info *hive;
if (!adev->gmc.xgmi.supported) if (!adev->gmc.xgmi.supported)
return; return -EINVAL;
hive = amdgpu_get_xgmi_hive(adev, 1); hive = amdgpu_get_xgmi_hive(adev, 1);
if (!hive) if (!hive)
return; return -EINVAL;
if (!(hive->number_devices--)) { if (!(hive->number_devices--)) {
amdgpu_xgmi_sysfs_destroy(adev, hive); amdgpu_xgmi_sysfs_destroy(adev, hive);
@@ -471,6 +478,8 @@ void amdgpu_xgmi_remove_device(struct amdgpu_device *adev)
amdgpu_xgmi_sysfs_rem_dev_info(adev, hive); amdgpu_xgmi_sysfs_rem_dev_info(adev, hive);
mutex_unlock(&hive->hive_lock); mutex_unlock(&hive->hive_lock);
} }
return psp_xgmi_terminate(&adev->psp);
} }
int amdgpu_xgmi_ras_late_init(struct amdgpu_device *adev) int amdgpu_xgmi_ras_late_init(struct amdgpu_device *adev)
@@ -521,3 +530,33 @@ void amdgpu_xgmi_ras_fini(struct amdgpu_device *adev)
kfree(ras_if); kfree(ras_if);
} }
} }
uint64_t amdgpu_xgmi_get_relative_phy_addr(struct amdgpu_device *adev,
uint64_t addr)
{
uint32_t df_inst_id;
uint64_t dram_base_addr = 0;
const struct amdgpu_df_funcs *df_funcs = adev->df.funcs;
if ((!df_funcs) ||
(!df_funcs->get_df_inst_id) ||
(!df_funcs->get_dram_base_addr)) {
dev_warn(adev->dev,
"XGMI: relative phy_addr algorithm is not supported\n");
return addr;
}
if (amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_DISALLOW)) {
dev_warn(adev->dev,
"failed to disable DF-Cstate, DF register may not be accessible\n");
return addr;
}
df_inst_id = df_funcs->get_df_inst_id(adev);
dram_base_addr = df_funcs->get_dram_base_addr(adev, df_inst_id);
if (amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_ALLOW))
dev_warn(adev->dev, "failed to enable DF-Cstate\n");
return addr + dram_base_addr;
}

View File

@@ -40,12 +40,14 @@ struct amdgpu_hive_info {
struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev, int lock); struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev, int lock);
int amdgpu_xgmi_update_topology(struct amdgpu_hive_info *hive, struct amdgpu_device *adev); int amdgpu_xgmi_update_topology(struct amdgpu_hive_info *hive, struct amdgpu_device *adev);
int amdgpu_xgmi_add_device(struct amdgpu_device *adev); int amdgpu_xgmi_add_device(struct amdgpu_device *adev);
void amdgpu_xgmi_remove_device(struct amdgpu_device *adev); int amdgpu_xgmi_remove_device(struct amdgpu_device *adev);
int amdgpu_xgmi_set_pstate(struct amdgpu_device *adev, int pstate); int amdgpu_xgmi_set_pstate(struct amdgpu_device *adev, int pstate);
int amdgpu_xgmi_get_hops_count(struct amdgpu_device *adev, int amdgpu_xgmi_get_hops_count(struct amdgpu_device *adev,
struct amdgpu_device *peer_adev); struct amdgpu_device *peer_adev);
int amdgpu_xgmi_ras_late_init(struct amdgpu_device *adev); int amdgpu_xgmi_ras_late_init(struct amdgpu_device *adev);
void amdgpu_xgmi_ras_fini(struct amdgpu_device *adev); void amdgpu_xgmi_ras_fini(struct amdgpu_device *adev);
uint64_t amdgpu_xgmi_get_relative_phy_addr(struct amdgpu_device *adev,
uint64_t addr);
static inline bool amdgpu_xgmi_same_hive(struct amdgpu_device *adev, static inline bool amdgpu_xgmi_same_hive(struct amdgpu_device *adev,
struct amdgpu_device *bo_adev) struct amdgpu_device *bo_adev)

View File

@@ -186,16 +186,10 @@ amdgpu_atombios_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *m
void amdgpu_atombios_dp_aux_init(struct amdgpu_connector *amdgpu_connector) void amdgpu_atombios_dp_aux_init(struct amdgpu_connector *amdgpu_connector)
{ {
int ret;
amdgpu_connector->ddc_bus->rec.hpd = amdgpu_connector->hpd.hpd; amdgpu_connector->ddc_bus->rec.hpd = amdgpu_connector->hpd.hpd;
amdgpu_connector->ddc_bus->aux.dev = amdgpu_connector->base.kdev;
amdgpu_connector->ddc_bus->aux.transfer = amdgpu_atombios_dp_aux_transfer; amdgpu_connector->ddc_bus->aux.transfer = amdgpu_atombios_dp_aux_transfer;
ret = drm_dp_aux_register(&amdgpu_connector->ddc_bus->aux); drm_dp_aux_init(&amdgpu_connector->ddc_bus->aux);
if (!ret)
amdgpu_connector->ddc_bus->has_aux = true; amdgpu_connector->ddc_bus->has_aux = true;
WARN(ret, "drm_dp_aux_register_i2c_bus() failed with error %d\n", ret);
} }
/***** general DP utility functions *****/ /***** general DP utility functions *****/

View File

@@ -614,7 +614,6 @@ static int dce_virtual_connector_encoder_init(struct amdgpu_device *adev,
connector->display_info.subpixel_order = SubPixelHorizontalRGB; connector->display_info.subpixel_order = SubPixelHorizontalRGB;
connector->interlace_allowed = false; connector->interlace_allowed = false;
connector->doublescan_allowed = false; connector->doublescan_allowed = false;
drm_connector_register(connector);
/* link them */ /* link them */
drm_connector_attach_encoder(connector, encoder); drm_connector_attach_encoder(connector, encoder);

View File

@@ -3164,12 +3164,7 @@ static int gfx_v10_0_kiq_enable_kgq(struct amdgpu_device *adev)
for (i = 0; i < adev->gfx.num_gfx_rings; i++) for (i = 0; i < adev->gfx.num_gfx_rings; i++)
kiq->pmf->kiq_map_queues(kiq_ring, &adev->gfx.gfx_ring[i]); kiq->pmf->kiq_map_queues(kiq_ring, &adev->gfx.gfx_ring[i]);
r = amdgpu_ring_test_ring(kiq_ring); return amdgpu_ring_test_helper(kiq_ring);
if (r) {
DRM_ERROR("kfq enable failed\n");
kiq_ring->sched.ready = false;
}
return r;
} }
#endif #endif
@@ -3785,7 +3780,7 @@ static int gfx_v10_0_kiq_disable_kgq(struct amdgpu_device *adev)
kiq->pmf->kiq_unmap_queues(kiq_ring, &adev->gfx.gfx_ring[i], kiq->pmf->kiq_unmap_queues(kiq_ring, &adev->gfx.gfx_ring[i],
PREEMPT_QUEUES, 0, 0); PREEMPT_QUEUES, 0, 0);
return amdgpu_ring_test_ring(kiq_ring); return amdgpu_ring_test_helper(kiq_ring);
} }
#endif #endif
@@ -3923,11 +3918,13 @@ static uint64_t gfx_v10_0_get_gpu_clock_counter(struct amdgpu_device *adev)
{ {
uint64_t clock; uint64_t clock;
amdgpu_gfx_off_ctrl(adev, false);
mutex_lock(&adev->gfx.gpu_clock_mutex); mutex_lock(&adev->gfx.gpu_clock_mutex);
WREG32_SOC15(GC, 0, mmRLC_CAPTURE_GPU_CLOCK_COUNT, 1); WREG32_SOC15(GC, 0, mmRLC_CAPTURE_GPU_CLOCK_COUNT, 1);
clock = (uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_LSB) | clock = (uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_LSB) |
((uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_MSB) << 32ULL); ((uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
mutex_unlock(&adev->gfx.gpu_clock_mutex); mutex_unlock(&adev->gfx.gpu_clock_mutex);
amdgpu_gfx_off_ctrl(adev, true);
return clock; return clock;
} }
@@ -4411,7 +4408,7 @@ static void gfx_v10_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
control |= ib->length_dw | (vmid << 24); control |= ib->length_dw | (vmid << 24);
if (amdgpu_mcbp && (ib->flags & AMDGPU_IB_FLAG_PREEMPT)) { if ((amdgpu_sriov_vf(ring->adev) || amdgpu_mcbp) && (ib->flags & AMDGPU_IB_FLAG_PREEMPT)) {
control |= INDIRECT_BUFFER_PRE_ENB(1); control |= INDIRECT_BUFFER_PRE_ENB(1);
if (flags & AMDGPU_IB_PREEMPTED) if (flags & AMDGPU_IB_PREEMPTED)
@@ -4419,7 +4416,7 @@ static void gfx_v10_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
if (!(ib->flags & AMDGPU_IB_FLAG_CE)) if (!(ib->flags & AMDGPU_IB_FLAG_CE))
gfx_v10_0_ring_emit_de_meta(ring, gfx_v10_0_ring_emit_de_meta(ring,
flags & AMDGPU_IB_PREEMPTED ? true : false); (!amdgpu_sriov_vf(ring->adev) && flags & AMDGPU_IB_PREEMPTED) ? true : false);
} }
amdgpu_ring_write(ring, header); amdgpu_ring_write(ring, header);
@@ -4566,9 +4563,9 @@ static void gfx_v10_0_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flag
{ {
uint32_t dw2 = 0; uint32_t dw2 = 0;
if (amdgpu_mcbp) if (amdgpu_mcbp || amdgpu_sriov_vf(ring->adev))
gfx_v10_0_ring_emit_ce_meta(ring, gfx_v10_0_ring_emit_ce_meta(ring,
flags & AMDGPU_IB_PREEMPTED ? true : false); (!amdgpu_sriov_vf(ring->adev) && flags & AMDGPU_IB_PREEMPTED) ? true : false);
gfx_v10_0_ring_emit_tmz(ring, true); gfx_v10_0_ring_emit_tmz(ring, true);

View File

@@ -1106,10 +1106,11 @@ static void gfx_v9_0_check_fw_write_wait(struct amdgpu_device *adev)
adev->gfx.me_fw_write_wait = false; adev->gfx.me_fw_write_wait = false;
adev->gfx.mec_fw_write_wait = false; adev->gfx.mec_fw_write_wait = false;
if ((adev->gfx.mec_fw_version < 0x000001a5) || if ((adev->asic_type != CHIP_ARCTURUS) &&
((adev->gfx.mec_fw_version < 0x000001a5) ||
(adev->gfx.mec_feature_version < 46) || (adev->gfx.mec_feature_version < 46) ||
(adev->gfx.pfp_fw_version < 0x000000b7) || (adev->gfx.pfp_fw_version < 0x000000b7) ||
(adev->gfx.pfp_feature_version < 46)) (adev->gfx.pfp_feature_version < 46)))
DRM_WARN_ONCE("CP firmware version too old, please update!"); DRM_WARN_ONCE("CP firmware version too old, please update!");
switch (adev->asic_type) { switch (adev->asic_type) {
@@ -1193,6 +1194,14 @@ static bool gfx_v9_0_should_disable_gfxoff(struct pci_dev *pdev)
return false; return false;
} }
static bool is_raven_kicker(struct amdgpu_device *adev)
{
if (adev->pm.fw_version >= 0x41e2b)
return true;
else
return false;
}
static void gfx_v9_0_check_if_need_gfxoff(struct amdgpu_device *adev) static void gfx_v9_0_check_if_need_gfxoff(struct amdgpu_device *adev)
{ {
if (gfx_v9_0_should_disable_gfxoff(adev->pdev)) if (gfx_v9_0_should_disable_gfxoff(adev->pdev))
@@ -1205,9 +1214,8 @@ static void gfx_v9_0_check_if_need_gfxoff(struct amdgpu_device *adev)
break; break;
case CHIP_RAVEN: case CHIP_RAVEN:
if (!(adev->rev_id >= 0x8 || adev->pdev->device == 0x15d8) && if (!(adev->rev_id >= 0x8 || adev->pdev->device == 0x15d8) &&
((adev->gfx.rlc_fw_version != 106 && ((!is_raven_kicker(adev) &&
adev->gfx.rlc_fw_version < 531) || adev->gfx.rlc_fw_version < 531) ||
(adev->gfx.rlc_fw_version == 53815) ||
(adev->gfx.rlc_feature_version < 1) || (adev->gfx.rlc_feature_version < 1) ||
!adev->gfx.rlc.is_rlc_v2_1)) !adev->gfx.rlc.is_rlc_v2_1))
adev->pm.pp_feature &= ~PP_GFXOFF_MASK; adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
@@ -3959,6 +3967,7 @@ static uint64_t gfx_v9_0_get_gpu_clock_counter(struct amdgpu_device *adev)
{ {
uint64_t clock; uint64_t clock;
amdgpu_gfx_off_ctrl(adev, false);
mutex_lock(&adev->gfx.gpu_clock_mutex); mutex_lock(&adev->gfx.gpu_clock_mutex);
if (adev->asic_type == CHIP_VEGA10 && amdgpu_sriov_runtime(adev)) { if (adev->asic_type == CHIP_VEGA10 && amdgpu_sriov_runtime(adev)) {
uint32_t tmp, lsb, msb, i = 0; uint32_t tmp, lsb, msb, i = 0;
@@ -3977,6 +3986,7 @@ static uint64_t gfx_v9_0_get_gpu_clock_counter(struct amdgpu_device *adev)
((uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_MSB) << 32ULL); ((uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
} }
mutex_unlock(&adev->gfx.gpu_clock_mutex); mutex_unlock(&adev->gfx.gpu_clock_mutex);
amdgpu_gfx_off_ctrl(adev, true);
return clock; return clock;
} }

View File

@@ -135,6 +135,10 @@ static void gfxhub_v2_0_init_cache_regs(struct amdgpu_device *adev)
{ {
uint32_t tmp; uint32_t tmp;
/* These regs are not accessible for VF, PF will program these in SRIOV */
if (amdgpu_sriov_vf(adev))
return;
/* Setup L2 cache */ /* Setup L2 cache */
tmp = RREG32_SOC15(GC, 0, mmGCVM_L2_CNTL); tmp = RREG32_SOC15(GC, 0, mmGCVM_L2_CNTL);
tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL, ENABLE_L2_CACHE, 1); tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL, ENABLE_L2_CACHE, 1);
@@ -298,9 +302,11 @@ void gfxhub_v2_0_gart_disable(struct amdgpu_device *adev)
ENABLE_ADVANCED_DRIVER_MODEL, 0); ENABLE_ADVANCED_DRIVER_MODEL, 0);
WREG32_SOC15(GC, 0, mmGCMC_VM_MX_L1_TLB_CNTL, tmp); WREG32_SOC15(GC, 0, mmGCMC_VM_MX_L1_TLB_CNTL, tmp);
if (!amdgpu_sriov_vf(adev)) {
/* Setup L2 cache */ /* Setup L2 cache */
WREG32_FIELD15(GC, 0, GCVM_L2_CNTL, ENABLE_L2_CACHE, 0); WREG32_FIELD15(GC, 0, GCVM_L2_CNTL, ENABLE_L2_CACHE, 0);
WREG32_SOC15(GC, 0, mmGCVM_L2_CNTL3, 0); WREG32_SOC15(GC, 0, mmGCVM_L2_CNTL3, 0);
}
} }
/** /**

View File

@@ -476,13 +476,26 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
{ {
bool use_semaphore = gmc_v9_0_use_invalidate_semaphore(adev, vmhub); bool use_semaphore = gmc_v9_0_use_invalidate_semaphore(adev, vmhub);
const unsigned eng = 17; const unsigned eng = 17;
u32 j, inv_req, tmp; u32 j, inv_req, inv_req2, tmp;
struct amdgpu_vmhub *hub; struct amdgpu_vmhub *hub;
BUG_ON(vmhub >= adev->num_vmhubs); BUG_ON(vmhub >= adev->num_vmhubs);
hub = &adev->vmhub[vmhub]; hub = &adev->vmhub[vmhub];
if (adev->gmc.xgmi.num_physical_nodes &&
adev->asic_type == CHIP_VEGA20) {
/* Vega20+XGMI caches PTEs in TC and TLB. Add a
* heavy-weight TLB flush (type 2), which flushes
* both. Due to a race condition with concurrent
* memory accesses using the same TLB cache line, we
* still need a second TLB flush after this.
*/
inv_req = gmc_v9_0_get_invalidate_req(vmid, 2);
inv_req2 = gmc_v9_0_get_invalidate_req(vmid, flush_type);
} else {
inv_req = gmc_v9_0_get_invalidate_req(vmid, flush_type); inv_req = gmc_v9_0_get_invalidate_req(vmid, flush_type);
inv_req2 = 0;
}
/* This is necessary for a HW workaround under SRIOV as well /* This is necessary for a HW workaround under SRIOV as well
* as GFXOFF under bare metal * as GFXOFF under bare metal
@@ -521,11 +534,13 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
DRM_ERROR("Timeout waiting for sem acquire in VM flush!\n"); DRM_ERROR("Timeout waiting for sem acquire in VM flush!\n");
} }
do {
WREG32_NO_KIQ(hub->vm_inv_eng0_req + eng, inv_req); WREG32_NO_KIQ(hub->vm_inv_eng0_req + eng, inv_req);
/* /*
* Issue a dummy read to wait for the ACK register to be cleared * Issue a dummy read to wait for the ACK register to
* to avoid a false ACK due to the new fast GRBM interface. * be cleared to avoid a false ACK due to the new fast
* GRBM interface.
*/ */
if (vmhub == AMDGPU_GFXHUB_0) if (vmhub == AMDGPU_GFXHUB_0)
RREG32_NO_KIQ(hub->vm_inv_eng0_req + eng); RREG32_NO_KIQ(hub->vm_inv_eng0_req + eng);
@@ -537,6 +552,10 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
udelay(1); udelay(1);
} }
inv_req = inv_req2;
inv_req2 = 0;
} while (inv_req);
/* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */ /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
if (use_semaphore) if (use_semaphore)
/* /*
@@ -577,9 +596,26 @@ static int gmc_v9_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
return -EIO; return -EIO;
if (ring->sched.ready) { if (ring->sched.ready) {
/* Vega20+XGMI caches PTEs in TC and TLB. Add a
* heavy-weight TLB flush (type 2), which flushes
* both. Due to a race condition with concurrent
* memory accesses using the same TLB cache line, we
* still need a second TLB flush after this.
*/
bool vega20_xgmi_wa = (adev->gmc.xgmi.num_physical_nodes &&
adev->asic_type == CHIP_VEGA20);
/* 2 dwords flush + 8 dwords fence */
unsigned int ndw = kiq->pmf->invalidate_tlbs_size + 8;
if (vega20_xgmi_wa)
ndw += kiq->pmf->invalidate_tlbs_size;
spin_lock(&adev->gfx.kiq.ring_lock); spin_lock(&adev->gfx.kiq.ring_lock);
/* 2 dwords flush + 8 dwords fence */ /* 2 dwords flush + 8 dwords fence */
amdgpu_ring_alloc(ring, kiq->pmf->invalidate_tlbs_size + 8); amdgpu_ring_alloc(ring, ndw);
if (vega20_xgmi_wa)
kiq->pmf->kiq_invalidate_tlbs(ring,
pasid, 2, all_hub);
kiq->pmf->kiq_invalidate_tlbs(ring, kiq->pmf->kiq_invalidate_tlbs(ring,
pasid, flush_type, all_hub); pasid, flush_type, all_hub);
amdgpu_fence_emit_polling(ring, &seq); amdgpu_fence_emit_polling(ring, &seq);
@@ -1271,6 +1307,19 @@ static void gmc_v9_0_init_golden_registers(struct amdgpu_device *adev)
} }
} }
/**
* gmc_v9_0_restore_registers - restores regs
*
* @adev: amdgpu_device pointer
*
* This restores register values, saved at suspend.
*/
static void gmc_v9_0_restore_registers(struct amdgpu_device *adev)
{
if (adev->asic_type == CHIP_RAVEN)
WREG32(mmDCHUBBUB_SDPIF_MMIO_CNTRL_0, adev->gmc.sdpif_register);
}
/** /**
* gmc_v9_0_gart_enable - gart enable * gmc_v9_0_gart_enable - gart enable
* *
@@ -1376,6 +1425,20 @@ static int gmc_v9_0_hw_init(void *handle)
return r; return r;
} }
/**
* gmc_v9_0_save_registers - saves regs
*
* @adev: amdgpu_device pointer
*
* This saves potential register values that should be
* restored upon resume
*/
static void gmc_v9_0_save_registers(struct amdgpu_device *adev)
{
if (adev->asic_type == CHIP_RAVEN)
adev->gmc.sdpif_register = RREG32(mmDCHUBBUB_SDPIF_MMIO_CNTRL_0);
}
/** /**
* gmc_v9_0_gart_disable - gart disable * gmc_v9_0_gart_disable - gart disable
* *
@@ -1412,9 +1475,16 @@ static int gmc_v9_0_hw_fini(void *handle)
static int gmc_v9_0_suspend(void *handle) static int gmc_v9_0_suspend(void *handle)
{ {
int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
return gmc_v9_0_hw_fini(adev); r = gmc_v9_0_hw_fini(adev);
if (r)
return r;
gmc_v9_0_save_registers(adev);
return 0;
} }
static int gmc_v9_0_resume(void *handle) static int gmc_v9_0_resume(void *handle)
@@ -1422,6 +1492,7 @@ static int gmc_v9_0_resume(void *handle)
int r; int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
gmc_v9_0_restore_registers(adev);
r = gmc_v9_0_hw_init(adev); r = gmc_v9_0_hw_init(adev);
if (r) if (r)
return r; return r;

View File

@@ -318,6 +318,7 @@ static void nbio_v7_4_handle_ras_controller_intr_no_bifring(struct amdgpu_device
{ {
uint32_t bif_doorbell_intr_cntl; uint32_t bif_doorbell_intr_cntl;
struct ras_manager *obj = amdgpu_ras_find_obj(adev, adev->nbio.ras_if); struct ras_manager *obj = amdgpu_ras_find_obj(adev, adev->nbio.ras_if);
struct ras_err_data err_data = {0, 0, 0, NULL};
bif_doorbell_intr_cntl = RREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL); bif_doorbell_intr_cntl = RREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL);
if (REG_GET_FIELD(bif_doorbell_intr_cntl, if (REG_GET_FIELD(bif_doorbell_intr_cntl,
@@ -332,7 +333,19 @@ static void nbio_v7_4_handle_ras_controller_intr_no_bifring(struct amdgpu_device
* clear error status after ras_controller_intr according to * clear error status after ras_controller_intr according to
* hw team and count ue number for query * hw team and count ue number for query
*/ */
nbio_v7_4_query_ras_error_count(adev, &obj->err_data); nbio_v7_4_query_ras_error_count(adev, &err_data);
/* logging on error counter and printing for awareness */
obj->err_data.ue_count += err_data.ue_count;
obj->err_data.ce_count += err_data.ce_count;
if (err_data.ce_count)
DRM_INFO("%ld correctable errors detected in %s block\n",
obj->err_data.ce_count, adev->nbio.ras_if->name);
if (err_data.ue_count)
DRM_INFO("%ld uncorrectable errors detected in %s block\n",
obj->err_data.ue_count, adev->nbio.ras_if->name);
DRM_WARN("RAS controller interrupt triggered by NBIF error\n"); DRM_WARN("RAS controller interrupt triggered by NBIF error\n");

View File

@@ -420,6 +420,7 @@ static int psp_v11_0_ring_init(struct psp_context *psp,
struct psp_ring *ring; struct psp_ring *ring;
struct amdgpu_device *adev = psp->adev; struct amdgpu_device *adev = psp->adev;
if (!amdgpu_sriov_vf(adev))
psp_v11_0_reroute_ih(psp); psp_v11_0_reroute_ih(psp);
ring = &psp->km_ring; ring = &psp->km_ring;

View File

@@ -746,11 +746,9 @@ static int sdma_v5_0_gfx_resume(struct amdgpu_device *adev)
sdma_v5_0_enable(adev, true); sdma_v5_0_enable(adev, true);
} }
r = amdgpu_ring_test_ring(ring); r = amdgpu_ring_test_helper(ring);
if (r) { if (r)
ring->sched.ready = false;
return r; return r;
}
if (adev->mman.buffer_funcs_ring == ring) if (adev->mman.buffer_funcs_ring == ring)
amdgpu_ttm_set_buffer_funcs_status(adev, true); amdgpu_ttm_set_buffer_funcs_status(adev, true);

View File

@@ -272,7 +272,12 @@ static u32 soc15_get_config_memsize(struct amdgpu_device *adev)
static u32 soc15_get_xclk(struct amdgpu_device *adev) static u32 soc15_get_xclk(struct amdgpu_device *adev)
{ {
return adev->clock.spll.reference_freq; u32 reference_clock = adev->clock.spll.reference_freq;
if (adev->asic_type == CHIP_RAVEN)
return reference_clock / 4;
return reference_clock;
} }

View File

@@ -127,6 +127,8 @@ static int kfd_open(struct inode *inode, struct file *filep)
return PTR_ERR(process); return PTR_ERR(process);
if (kfd_is_locked()) { if (kfd_is_locked()) {
dev_dbg(kfd_device, "kfd is locked!\n"
"process %d unreferenced", process->pasid);
kfd_unref_process(process); kfd_unref_process(process);
return -EAGAIN; return -EAGAIN;
} }

View File

@@ -648,6 +648,9 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
if (kfd->kfd2kgd->get_hive_id) if (kfd->kfd2kgd->get_hive_id)
kfd->hive_id = kfd->kfd2kgd->get_hive_id(kfd->kgd); kfd->hive_id = kfd->kfd2kgd->get_hive_id(kfd->kgd);
if (kfd->kfd2kgd->get_unique_id)
kfd->unique_id = kfd->kfd2kgd->get_unique_id(kfd->kgd);
if (kfd_interrupt_init(kfd)) { if (kfd_interrupt_init(kfd)) {
dev_err(kfd_device, "Error initializing interrupts\n"); dev_err(kfd_device, "Error initializing interrupts\n");
goto kfd_interrupt_error; goto kfd_interrupt_error;
@@ -710,7 +713,7 @@ out:
void kgd2kfd_device_exit(struct kfd_dev *kfd) void kgd2kfd_device_exit(struct kfd_dev *kfd)
{ {
if (kfd->init_complete) { if (kfd->init_complete) {
kgd2kfd_suspend(kfd); kgd2kfd_suspend(kfd, false);
device_queue_manager_uninit(kfd->dqm); device_queue_manager_uninit(kfd->dqm);
kfd_interrupt_exit(kfd); kfd_interrupt_exit(kfd);
kfd_topology_remove_device(kfd); kfd_topology_remove_device(kfd);
@@ -731,7 +734,7 @@ int kgd2kfd_pre_reset(struct kfd_dev *kfd)
kfd->dqm->ops.pre_reset(kfd->dqm); kfd->dqm->ops.pre_reset(kfd->dqm);
kgd2kfd_suspend(kfd); kgd2kfd_suspend(kfd, false);
kfd_signal_reset_event(kfd); kfd_signal_reset_event(kfd);
return 0; return 0;
@@ -765,21 +768,23 @@ bool kfd_is_locked(void)
return (atomic_read(&kfd_locked) > 0); return (atomic_read(&kfd_locked) > 0);
} }
void kgd2kfd_suspend(struct kfd_dev *kfd) void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm)
{ {
if (!kfd->init_complete) if (!kfd->init_complete)
return; return;
/* for runtime suspend, skip locking kfd */
if (!run_pm) {
/* For first KFD device suspend all the KFD processes */ /* For first KFD device suspend all the KFD processes */
if (atomic_inc_return(&kfd_locked) == 1) if (atomic_inc_return(&kfd_locked) == 1)
kfd_suspend_all_processes(); kfd_suspend_all_processes();
}
kfd->dqm->ops.stop(kfd->dqm); kfd->dqm->ops.stop(kfd->dqm);
kfd_iommu_suspend(kfd); kfd_iommu_suspend(kfd);
} }
int kgd2kfd_resume(struct kfd_dev *kfd) int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm)
{ {
int ret, count; int ret, count;
@@ -790,10 +795,13 @@ int kgd2kfd_resume(struct kfd_dev *kfd)
if (ret) if (ret)
return ret; return ret;
/* for runtime resume, skip unlocking kfd */
if (!run_pm) {
count = atomic_dec_return(&kfd_locked); count = atomic_dec_return(&kfd_locked);
WARN_ONCE(count < 0, "KFD suspend / resume ref. error"); WARN_ONCE(count < 0, "KFD suspend / resume ref. error");
if (count == 0) if (count == 0)
ret = kfd_resume_all_processes(); ret = kfd_resume_all_processes();
}
return ret; return ret;
} }

View File

@@ -78,14 +78,14 @@ static bool is_pipe_enabled(struct device_queue_manager *dqm, int mec, int pipe)
/* queue is available for KFD usage if bit is 1 */ /* queue is available for KFD usage if bit is 1 */
for (i = 0; i < dqm->dev->shared_resources.num_queue_per_pipe; ++i) for (i = 0; i < dqm->dev->shared_resources.num_queue_per_pipe; ++i)
if (test_bit(pipe_offset + i, if (test_bit(pipe_offset + i,
dqm->dev->shared_resources.queue_bitmap)) dqm->dev->shared_resources.cp_queue_bitmap))
return true; return true;
return false; return false;
} }
unsigned int get_queues_num(struct device_queue_manager *dqm) unsigned int get_cp_queues_num(struct device_queue_manager *dqm)
{ {
return bitmap_weight(dqm->dev->shared_resources.queue_bitmap, return bitmap_weight(dqm->dev->shared_resources.cp_queue_bitmap,
KGD_MAX_QUEUES); KGD_MAX_QUEUES);
} }
@@ -109,6 +109,11 @@ static unsigned int get_num_xgmi_sdma_engines(struct device_queue_manager *dqm)
return dqm->dev->device_info->num_xgmi_sdma_engines; return dqm->dev->device_info->num_xgmi_sdma_engines;
} }
static unsigned int get_num_all_sdma_engines(struct device_queue_manager *dqm)
{
return get_num_sdma_engines(dqm) + get_num_xgmi_sdma_engines(dqm);
}
unsigned int get_num_sdma_queues(struct device_queue_manager *dqm) unsigned int get_num_sdma_queues(struct device_queue_manager *dqm)
{ {
return dqm->dev->device_info->num_sdma_engines return dqm->dev->device_info->num_sdma_engines
@@ -132,6 +137,22 @@ void program_sh_mem_settings(struct device_queue_manager *dqm,
qpd->sh_mem_bases); qpd->sh_mem_bases);
} }
void increment_queue_count(struct device_queue_manager *dqm,
enum kfd_queue_type type)
{
dqm->active_queue_count++;
if (type == KFD_QUEUE_TYPE_COMPUTE || type == KFD_QUEUE_TYPE_DIQ)
dqm->active_cp_queue_count++;
}
void decrement_queue_count(struct device_queue_manager *dqm,
enum kfd_queue_type type)
{
dqm->active_queue_count--;
if (type == KFD_QUEUE_TYPE_COMPUTE || type == KFD_QUEUE_TYPE_DIQ)
dqm->active_cp_queue_count--;
}
static int allocate_doorbell(struct qcm_process_device *qpd, struct queue *q) static int allocate_doorbell(struct qcm_process_device *qpd, struct queue *q)
{ {
struct kfd_dev *dev = qpd->dqm->dev; struct kfd_dev *dev = qpd->dqm->dev;
@@ -281,8 +302,6 @@ static int create_queue_nocpsch(struct device_queue_manager *dqm,
struct mqd_manager *mqd_mgr; struct mqd_manager *mqd_mgr;
int retval; int retval;
print_queue(q);
dqm_lock(dqm); dqm_lock(dqm);
if (dqm->total_queue_count >= max_num_of_queues_per_device) { if (dqm->total_queue_count >= max_num_of_queues_per_device) {
@@ -359,12 +378,7 @@ add_queue_to_list:
list_add(&q->list, &qpd->queues_list); list_add(&q->list, &qpd->queues_list);
qpd->queue_count++; qpd->queue_count++;
if (q->properties.is_active) if (q->properties.is_active)
dqm->queue_count++; increment_queue_count(dqm, q->properties.type);
if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
dqm->sdma_queue_count++;
else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
dqm->xgmi_sdma_queue_count++;
/* /*
* Unconditionally increment this counter, regardless of the queue's * Unconditionally increment this counter, regardless of the queue's
@@ -446,15 +460,13 @@ static int destroy_queue_nocpsch_locked(struct device_queue_manager *dqm,
mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type( mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
q->properties.type)]; q->properties.type)];
if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE) { if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE)
deallocate_hqd(dqm, q); deallocate_hqd(dqm, q);
} else if (q->properties.type == KFD_QUEUE_TYPE_SDMA) { else if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
dqm->sdma_queue_count--;
deallocate_sdma_queue(dqm, q); deallocate_sdma_queue(dqm, q);
} else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) { else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
dqm->xgmi_sdma_queue_count--;
deallocate_sdma_queue(dqm, q); deallocate_sdma_queue(dqm, q);
} else { else {
pr_debug("q->properties.type %d is invalid\n", pr_debug("q->properties.type %d is invalid\n",
q->properties.type); q->properties.type);
return -EINVAL; return -EINVAL;
@@ -494,7 +506,7 @@ static int destroy_queue_nocpsch_locked(struct device_queue_manager *dqm,
} }
qpd->queue_count--; qpd->queue_count--;
if (q->properties.is_active) if (q->properties.is_active)
dqm->queue_count--; decrement_queue_count(dqm, q->properties.type);
return retval; return retval;
} }
@@ -563,13 +575,13 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q)
/* /*
* check active state vs. the previous state and modify * check active state vs. the previous state and modify
* counter accordingly. map_queues_cpsch uses the * counter accordingly. map_queues_cpsch uses the
* dqm->queue_count to determine whether a new runlist must be * dqm->active_queue_count to determine whether a new runlist must be
* uploaded. * uploaded.
*/ */
if (q->properties.is_active && !prev_active) if (q->properties.is_active && !prev_active)
dqm->queue_count++; increment_queue_count(dqm, q->properties.type);
else if (!q->properties.is_active && prev_active) else if (!q->properties.is_active && prev_active)
dqm->queue_count--; decrement_queue_count(dqm, q->properties.type);
if (dqm->sched_policy != KFD_SCHED_POLICY_NO_HWS) if (dqm->sched_policy != KFD_SCHED_POLICY_NO_HWS)
retval = map_queues_cpsch(dqm); retval = map_queues_cpsch(dqm);
@@ -618,7 +630,7 @@ static int evict_process_queues_nocpsch(struct device_queue_manager *dqm,
mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type( mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
q->properties.type)]; q->properties.type)];
q->properties.is_active = false; q->properties.is_active = false;
dqm->queue_count--; decrement_queue_count(dqm, q->properties.type);
if (WARN_ONCE(!dqm->sched_running, "Evict when stopped\n")) if (WARN_ONCE(!dqm->sched_running, "Evict when stopped\n"))
continue; continue;
@@ -662,7 +674,7 @@ static int evict_process_queues_cpsch(struct device_queue_manager *dqm,
continue; continue;
q->properties.is_active = false; q->properties.is_active = false;
dqm->queue_count--; decrement_queue_count(dqm, q->properties.type);
} }
retval = execute_queues_cpsch(dqm, retval = execute_queues_cpsch(dqm,
qpd->is_debug ? qpd->is_debug ?
@@ -731,7 +743,7 @@ static int restore_process_queues_nocpsch(struct device_queue_manager *dqm,
mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type( mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
q->properties.type)]; q->properties.type)];
q->properties.is_active = true; q->properties.is_active = true;
dqm->queue_count++; increment_queue_count(dqm, q->properties.type);
if (WARN_ONCE(!dqm->sched_running, "Restore when stopped\n")) if (WARN_ONCE(!dqm->sched_running, "Restore when stopped\n"))
continue; continue;
@@ -786,7 +798,7 @@ static int restore_process_queues_cpsch(struct device_queue_manager *dqm,
continue; continue;
q->properties.is_active = true; q->properties.is_active = true;
dqm->queue_count++; increment_queue_count(dqm, q->properties.type);
} }
retval = execute_queues_cpsch(dqm, retval = execute_queues_cpsch(dqm,
KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0); KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
@@ -899,16 +911,15 @@ static int initialize_nocpsch(struct device_queue_manager *dqm)
mutex_init(&dqm->lock_hidden); mutex_init(&dqm->lock_hidden);
INIT_LIST_HEAD(&dqm->queues); INIT_LIST_HEAD(&dqm->queues);
dqm->queue_count = dqm->next_pipe_to_allocate = 0; dqm->active_queue_count = dqm->next_pipe_to_allocate = 0;
dqm->sdma_queue_count = 0; dqm->active_cp_queue_count = 0;
dqm->xgmi_sdma_queue_count = 0;
for (pipe = 0; pipe < get_pipes_per_mec(dqm); pipe++) { for (pipe = 0; pipe < get_pipes_per_mec(dqm); pipe++) {
int pipe_offset = pipe * get_queues_per_pipe(dqm); int pipe_offset = pipe * get_queues_per_pipe(dqm);
for (queue = 0; queue < get_queues_per_pipe(dqm); queue++) for (queue = 0; queue < get_queues_per_pipe(dqm); queue++)
if (test_bit(pipe_offset + queue, if (test_bit(pipe_offset + queue,
dqm->dev->shared_resources.queue_bitmap)) dqm->dev->shared_resources.cp_queue_bitmap))
dqm->allocated_queues[pipe] |= 1 << queue; dqm->allocated_queues[pipe] |= 1 << queue;
} }
@@ -924,7 +935,7 @@ static void uninitialize(struct device_queue_manager *dqm)
{ {
int i; int i;
WARN_ON(dqm->queue_count > 0 || dqm->processes_count > 0); WARN_ON(dqm->active_queue_count > 0 || dqm->processes_count > 0);
kfree(dqm->allocated_queues); kfree(dqm->allocated_queues);
for (i = 0 ; i < KFD_MQD_TYPE_MAX ; i++) for (i = 0 ; i < KFD_MQD_TYPE_MAX ; i++)
@@ -966,8 +977,11 @@ static int allocate_sdma_queue(struct device_queue_manager *dqm,
int bit; int bit;
if (q->properties.type == KFD_QUEUE_TYPE_SDMA) { if (q->properties.type == KFD_QUEUE_TYPE_SDMA) {
if (dqm->sdma_bitmap == 0) if (dqm->sdma_bitmap == 0) {
pr_err("No more SDMA queue to allocate\n");
return -ENOMEM; return -ENOMEM;
}
bit = __ffs64(dqm->sdma_bitmap); bit = __ffs64(dqm->sdma_bitmap);
dqm->sdma_bitmap &= ~(1ULL << bit); dqm->sdma_bitmap &= ~(1ULL << bit);
q->sdma_id = bit; q->sdma_id = bit;
@@ -976,8 +990,10 @@ static int allocate_sdma_queue(struct device_queue_manager *dqm,
q->properties.sdma_queue_id = q->sdma_id / q->properties.sdma_queue_id = q->sdma_id /
get_num_sdma_engines(dqm); get_num_sdma_engines(dqm);
} else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) { } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
if (dqm->xgmi_sdma_bitmap == 0) if (dqm->xgmi_sdma_bitmap == 0) {
pr_err("No more XGMI SDMA queue to allocate\n");
return -ENOMEM; return -ENOMEM;
}
bit = __ffs64(dqm->xgmi_sdma_bitmap); bit = __ffs64(dqm->xgmi_sdma_bitmap);
dqm->xgmi_sdma_bitmap &= ~(1ULL << bit); dqm->xgmi_sdma_bitmap &= ~(1ULL << bit);
q->sdma_id = bit; q->sdma_id = bit;
@@ -1029,7 +1045,7 @@ static int set_sched_resources(struct device_queue_manager *dqm)
mec = (i / dqm->dev->shared_resources.num_queue_per_pipe) mec = (i / dqm->dev->shared_resources.num_queue_per_pipe)
/ dqm->dev->shared_resources.num_pipe_per_mec; / dqm->dev->shared_resources.num_pipe_per_mec;
if (!test_bit(i, dqm->dev->shared_resources.queue_bitmap)) if (!test_bit(i, dqm->dev->shared_resources.cp_queue_bitmap))
continue; continue;
/* only acquire queues from the first MEC */ /* only acquire queues from the first MEC */
@@ -1064,9 +1080,9 @@ static int initialize_cpsch(struct device_queue_manager *dqm)
mutex_init(&dqm->lock_hidden); mutex_init(&dqm->lock_hidden);
INIT_LIST_HEAD(&dqm->queues); INIT_LIST_HEAD(&dqm->queues);
dqm->queue_count = dqm->processes_count = 0; dqm->active_queue_count = dqm->processes_count = 0;
dqm->sdma_queue_count = 0; dqm->active_cp_queue_count = 0;
dqm->xgmi_sdma_queue_count = 0;
dqm->active_runlist = false; dqm->active_runlist = false;
dqm->sdma_bitmap = ~0ULL >> (64 - get_num_sdma_queues(dqm)); dqm->sdma_bitmap = ~0ULL >> (64 - get_num_sdma_queues(dqm));
dqm->xgmi_sdma_bitmap = ~0ULL >> (64 - get_num_xgmi_sdma_queues(dqm)); dqm->xgmi_sdma_bitmap = ~0ULL >> (64 - get_num_xgmi_sdma_queues(dqm));
@@ -1158,7 +1174,7 @@ static int create_kernel_queue_cpsch(struct device_queue_manager *dqm,
dqm->total_queue_count); dqm->total_queue_count);
list_add(&kq->list, &qpd->priv_queue_list); list_add(&kq->list, &qpd->priv_queue_list);
dqm->queue_count++; increment_queue_count(dqm, kq->queue->properties.type);
qpd->is_debug = true; qpd->is_debug = true;
execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0); execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
dqm_unlock(dqm); dqm_unlock(dqm);
@@ -1172,7 +1188,7 @@ static void destroy_kernel_queue_cpsch(struct device_queue_manager *dqm,
{ {
dqm_lock(dqm); dqm_lock(dqm);
list_del(&kq->list); list_del(&kq->list);
dqm->queue_count--; decrement_queue_count(dqm, kq->queue->properties.type);
qpd->is_debug = false; qpd->is_debug = false;
execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0); execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0);
/* /*
@@ -1238,13 +1254,9 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
list_add(&q->list, &qpd->queues_list); list_add(&q->list, &qpd->queues_list);
qpd->queue_count++; qpd->queue_count++;
if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
dqm->sdma_queue_count++;
else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
dqm->xgmi_sdma_queue_count++;
if (q->properties.is_active) { if (q->properties.is_active) {
dqm->queue_count++; increment_queue_count(dqm, q->properties.type);
retval = execute_queues_cpsch(dqm, retval = execute_queues_cpsch(dqm,
KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0); KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
} }
@@ -1298,20 +1310,6 @@ int amdkfd_fence_wait_timeout(unsigned int *fence_addr,
return 0; return 0;
} }
static int unmap_sdma_queues(struct device_queue_manager *dqm)
{
int i, retval = 0;
for (i = 0; i < dqm->dev->device_info->num_sdma_engines +
dqm->dev->device_info->num_xgmi_sdma_engines; i++) {
retval = pm_send_unmap_queue(&dqm->packets, KFD_QUEUE_TYPE_SDMA,
KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0, false, i);
if (retval)
return retval;
}
return retval;
}
/* dqm->lock mutex has to be locked before calling this function */ /* dqm->lock mutex has to be locked before calling this function */
static int map_queues_cpsch(struct device_queue_manager *dqm) static int map_queues_cpsch(struct device_queue_manager *dqm)
{ {
@@ -1319,7 +1317,7 @@ static int map_queues_cpsch(struct device_queue_manager *dqm)
if (!dqm->sched_running) if (!dqm->sched_running)
return 0; return 0;
if (dqm->queue_count <= 0 || dqm->processes_count <= 0) if (dqm->active_queue_count <= 0 || dqm->processes_count <= 0)
return 0; return 0;
if (dqm->active_runlist) if (dqm->active_runlist)
return 0; return 0;
@@ -1349,12 +1347,6 @@ static int unmap_queues_cpsch(struct device_queue_manager *dqm,
if (!dqm->active_runlist) if (!dqm->active_runlist)
return retval; return retval;
pr_debug("Before destroying queues, sdma queue count is : %u, xgmi sdma queue count is : %u\n",
dqm->sdma_queue_count, dqm->xgmi_sdma_queue_count);
if (dqm->sdma_queue_count > 0 || dqm->xgmi_sdma_queue_count)
unmap_sdma_queues(dqm);
retval = pm_send_unmap_queue(&dqm->packets, KFD_QUEUE_TYPE_COMPUTE, retval = pm_send_unmap_queue(&dqm->packets, KFD_QUEUE_TYPE_COMPUTE,
filter, filter_param, false, 0); filter, filter_param, false, 0);
if (retval) if (retval)
@@ -1427,18 +1419,15 @@ static int destroy_queue_cpsch(struct device_queue_manager *dqm,
deallocate_doorbell(qpd, q); deallocate_doorbell(qpd, q);
if (q->properties.type == KFD_QUEUE_TYPE_SDMA) { if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
dqm->sdma_queue_count--;
deallocate_sdma_queue(dqm, q); deallocate_sdma_queue(dqm, q);
} else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) { else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
dqm->xgmi_sdma_queue_count--;
deallocate_sdma_queue(dqm, q); deallocate_sdma_queue(dqm, q);
}
list_del(&q->list); list_del(&q->list);
qpd->queue_count--; qpd->queue_count--;
if (q->properties.is_active) { if (q->properties.is_active) {
dqm->queue_count--; decrement_queue_count(dqm, q->properties.type);
retval = execute_queues_cpsch(dqm, retval = execute_queues_cpsch(dqm,
KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0); KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
if (retval == -ETIME) if (retval == -ETIME)
@@ -1648,7 +1637,7 @@ static int process_termination_cpsch(struct device_queue_manager *dqm,
/* Clean all kernel queues */ /* Clean all kernel queues */
list_for_each_entry_safe(kq, kq_next, &qpd->priv_queue_list, list) { list_for_each_entry_safe(kq, kq_next, &qpd->priv_queue_list, list) {
list_del(&kq->list); list_del(&kq->list);
dqm->queue_count--; decrement_queue_count(dqm, kq->queue->properties.type);
qpd->is_debug = false; qpd->is_debug = false;
dqm->total_queue_count--; dqm->total_queue_count--;
filter = KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES; filter = KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES;
@@ -1656,16 +1645,13 @@ static int process_termination_cpsch(struct device_queue_manager *dqm,
/* Clear all user mode queues */ /* Clear all user mode queues */
list_for_each_entry(q, &qpd->queues_list, list) { list_for_each_entry(q, &qpd->queues_list, list) {
if (q->properties.type == KFD_QUEUE_TYPE_SDMA) { if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
dqm->sdma_queue_count--;
deallocate_sdma_queue(dqm, q); deallocate_sdma_queue(dqm, q);
} else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) { else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
dqm->xgmi_sdma_queue_count--;
deallocate_sdma_queue(dqm, q); deallocate_sdma_queue(dqm, q);
}
if (q->properties.is_active) if (q->properties.is_active)
dqm->queue_count--; decrement_queue_count(dqm, q->properties.type);
dqm->total_queue_count--; dqm->total_queue_count--;
} }
@@ -1742,8 +1728,7 @@ static int allocate_hiq_sdma_mqd(struct device_queue_manager *dqm)
struct kfd_dev *dev = dqm->dev; struct kfd_dev *dev = dqm->dev;
struct kfd_mem_obj *mem_obj = &dqm->hiq_sdma_mqd; struct kfd_mem_obj *mem_obj = &dqm->hiq_sdma_mqd;
uint32_t size = dqm->mqd_mgrs[KFD_MQD_TYPE_SDMA]->mqd_size * uint32_t size = dqm->mqd_mgrs[KFD_MQD_TYPE_SDMA]->mqd_size *
(dev->device_info->num_sdma_engines + get_num_all_sdma_engines(dqm) *
dev->device_info->num_xgmi_sdma_engines) *
dev->device_info->num_sdma_queues_per_engine + dev->device_info->num_sdma_queues_per_engine +
dqm->mqd_mgrs[KFD_MQD_TYPE_HIQ]->mqd_size; dqm->mqd_mgrs[KFD_MQD_TYPE_HIQ]->mqd_size;
@@ -1979,7 +1964,7 @@ int dqm_debugfs_hqds(struct seq_file *m, void *data)
for (queue = 0; queue < get_queues_per_pipe(dqm); queue++) { for (queue = 0; queue < get_queues_per_pipe(dqm); queue++) {
if (!test_bit(pipe_offset + queue, if (!test_bit(pipe_offset + queue,
dqm->dev->shared_resources.queue_bitmap)) dqm->dev->shared_resources.cp_queue_bitmap))
continue; continue;
r = dqm->dev->kfd2kgd->hqd_dump( r = dqm->dev->kfd2kgd->hqd_dump(
@@ -1995,8 +1980,7 @@ int dqm_debugfs_hqds(struct seq_file *m, void *data)
} }
} }
for (pipe = 0; pipe < get_num_sdma_engines(dqm) + for (pipe = 0; pipe < get_num_all_sdma_engines(dqm); pipe++) {
get_num_xgmi_sdma_engines(dqm); pipe++) {
for (queue = 0; for (queue = 0;
queue < dqm->dev->device_info->num_sdma_queues_per_engine; queue < dqm->dev->device_info->num_sdma_queues_per_engine;
queue++) { queue++) {

View File

@@ -180,9 +180,8 @@ struct device_queue_manager {
struct list_head queues; struct list_head queues;
unsigned int saved_flags; unsigned int saved_flags;
unsigned int processes_count; unsigned int processes_count;
unsigned int queue_count; unsigned int active_queue_count;
unsigned int sdma_queue_count; unsigned int active_cp_queue_count;
unsigned int xgmi_sdma_queue_count;
unsigned int total_queue_count; unsigned int total_queue_count;
unsigned int next_pipe_to_allocate; unsigned int next_pipe_to_allocate;
unsigned int *allocated_queues; unsigned int *allocated_queues;
@@ -219,7 +218,7 @@ void device_queue_manager_init_v10_navi10(
struct device_queue_manager_asic_ops *asic_ops); struct device_queue_manager_asic_ops *asic_ops);
void program_sh_mem_settings(struct device_queue_manager *dqm, void program_sh_mem_settings(struct device_queue_manager *dqm,
struct qcm_process_device *qpd); struct qcm_process_device *qpd);
unsigned int get_queues_num(struct device_queue_manager *dqm); unsigned int get_cp_queues_num(struct device_queue_manager *dqm);
unsigned int get_queues_per_pipe(struct device_queue_manager *dqm); unsigned int get_queues_per_pipe(struct device_queue_manager *dqm);
unsigned int get_pipes_per_mec(struct device_queue_manager *dqm); unsigned int get_pipes_per_mec(struct device_queue_manager *dqm);
unsigned int get_num_sdma_queues(struct device_queue_manager *dqm); unsigned int get_num_sdma_queues(struct device_queue_manager *dqm);

View File

@@ -47,9 +47,8 @@ static void pm_calc_rlib_size(struct packet_manager *pm,
struct kfd_dev *dev = pm->dqm->dev; struct kfd_dev *dev = pm->dqm->dev;
process_count = pm->dqm->processes_count; process_count = pm->dqm->processes_count;
queue_count = pm->dqm->queue_count; queue_count = pm->dqm->active_queue_count;
compute_queue_count = queue_count - pm->dqm->sdma_queue_count - compute_queue_count = pm->dqm->active_cp_queue_count;
pm->dqm->xgmi_sdma_queue_count;
/* check if there is over subscription /* check if there is over subscription
* Note: the arbitration between the number of VMIDs and * Note: the arbitration between the number of VMIDs and
@@ -62,7 +61,7 @@ static void pm_calc_rlib_size(struct packet_manager *pm,
max_proc_per_quantum = dev->max_proc_per_quantum; max_proc_per_quantum = dev->max_proc_per_quantum;
if ((process_count > max_proc_per_quantum) || if ((process_count > max_proc_per_quantum) ||
compute_queue_count > get_queues_num(pm->dqm)) { compute_queue_count > get_cp_queues_num(pm->dqm)) {
*over_subscription = true; *over_subscription = true;
pr_debug("Over subscribed runlist\n"); pr_debug("Over subscribed runlist\n");
} }
@@ -141,7 +140,7 @@ static int pm_create_runlist_ib(struct packet_manager *pm,
pm->ib_size_bytes = alloc_size_bytes; pm->ib_size_bytes = alloc_size_bytes;
pr_debug("Building runlist ib process count: %d queues count %d\n", pr_debug("Building runlist ib process count: %d queues count %d\n",
pm->dqm->processes_count, pm->dqm->queue_count); pm->dqm->processes_count, pm->dqm->active_queue_count);
/* build the run list ib packet */ /* build the run list ib packet */
list_for_each_entry(cur, queues, list) { list_for_each_entry(cur, queues, list) {

View File

@@ -41,6 +41,7 @@
#include <drm/drm_drv.h> #include <drm/drm_drv.h>
#include <drm/drm_device.h> #include <drm/drm_device.h>
#include <kgd_kfd_interface.h> #include <kgd_kfd_interface.h>
#include <linux/swap.h>
#include "amd_shared.h" #include "amd_shared.h"
@@ -294,6 +295,9 @@ struct kfd_dev {
/* xGMI */ /* xGMI */
uint64_t hive_id; uint64_t hive_id;
/* UUID */
uint64_t unique_id;
bool pci_atomic_requested; bool pci_atomic_requested;
/* SRAM ECC flag */ /* SRAM ECC flag */
@@ -502,6 +506,9 @@ struct queue {
struct kfd_process *process; struct kfd_process *process;
struct kfd_dev *device; struct kfd_dev *device;
void *gws; void *gws;
/* procfs */
struct kobject kobj;
}; };
/* /*
@@ -646,6 +653,7 @@ struct kfd_process_device {
* function. * function.
*/ */
bool already_dequeued; bool already_dequeued;
bool runtime_inuse;
/* Is this process/pasid bound to this device? (amd_iommu_bind_pasid) */ /* Is this process/pasid bound to this device? (amd_iommu_bind_pasid) */
enum kfd_pdd_bound bound; enum kfd_pdd_bound bound;
@@ -729,6 +737,7 @@ struct kfd_process {
/* Kobj for our procfs */ /* Kobj for our procfs */
struct kobject *kobj; struct kobject *kobj;
struct kobject *kobj_queues;
struct attribute attr_pasid; struct attribute attr_pasid;
}; };
@@ -835,6 +844,8 @@ extern struct device *kfd_device;
/* KFD's procfs */ /* KFD's procfs */
void kfd_procfs_init(void); void kfd_procfs_init(void);
void kfd_procfs_shutdown(void); void kfd_procfs_shutdown(void);
int kfd_procfs_add_queue(struct queue *q);
void kfd_procfs_del_queue(struct queue *q);
/* Topology */ /* Topology */
int kfd_topology_init(void); int kfd_topology_init(void);

View File

@@ -31,6 +31,7 @@
#include <linux/compat.h> #include <linux/compat.h>
#include <linux/mman.h> #include <linux/mman.h>
#include <linux/file.h> #include <linux/file.h>
#include <linux/pm_runtime.h>
#include "amdgpu_amdkfd.h" #include "amdgpu_amdkfd.h"
#include "amdgpu.h" #include "amdgpu.h"
@@ -132,6 +133,88 @@ void kfd_procfs_shutdown(void)
} }
} }
static ssize_t kfd_procfs_queue_show(struct kobject *kobj,
struct attribute *attr, char *buffer)
{
struct queue *q = container_of(kobj, struct queue, kobj);
if (!strcmp(attr->name, "size"))
return snprintf(buffer, PAGE_SIZE, "%llu",
q->properties.queue_size);
else if (!strcmp(attr->name, "type"))
return snprintf(buffer, PAGE_SIZE, "%d", q->properties.type);
else if (!strcmp(attr->name, "gpuid"))
return snprintf(buffer, PAGE_SIZE, "%u", q->device->id);
else
pr_err("Invalid attribute");
return 0;
}
static struct attribute attr_queue_size = {
.name = "size",
.mode = KFD_SYSFS_FILE_MODE
};
static struct attribute attr_queue_type = {
.name = "type",
.mode = KFD_SYSFS_FILE_MODE
};
static struct attribute attr_queue_gpuid = {
.name = "gpuid",
.mode = KFD_SYSFS_FILE_MODE
};
static struct attribute *procfs_queue_attrs[] = {
&attr_queue_size,
&attr_queue_type,
&attr_queue_gpuid,
NULL
};
static const struct sysfs_ops procfs_queue_ops = {
.show = kfd_procfs_queue_show,
};
static struct kobj_type procfs_queue_type = {
.sysfs_ops = &procfs_queue_ops,
.default_attrs = procfs_queue_attrs,
};
int kfd_procfs_add_queue(struct queue *q)
{
struct kfd_process *proc;
int ret;
if (!q || !q->process)
return -EINVAL;
proc = q->process;
/* Create proc/<pid>/queues/<queue id> folder */
if (!proc->kobj_queues)
return -EFAULT;
ret = kobject_init_and_add(&q->kobj, &procfs_queue_type,
proc->kobj_queues, "%u", q->properties.queue_id);
if (ret < 0) {
pr_warn("Creating proc/<pid>/queues/%u failed",
q->properties.queue_id);
kobject_put(&q->kobj);
return ret;
}
return 0;
}
void kfd_procfs_del_queue(struct queue *q)
{
if (!q)
return;
kobject_del(&q->kobj);
kobject_put(&q->kobj);
}
int kfd_process_create_wq(void) int kfd_process_create_wq(void)
{ {
if (!kfd_process_wq) if (!kfd_process_wq)
@@ -323,6 +406,11 @@ struct kfd_process *kfd_create_process(struct file *filep)
if (ret) if (ret)
pr_warn("Creating pasid for pid %d failed", pr_warn("Creating pasid for pid %d failed",
(int)process->lead_thread->pid); (int)process->lead_thread->pid);
process->kobj_queues = kobject_create_and_add("queues",
process->kobj);
if (!process->kobj_queues)
pr_warn("Creating KFD proc/queues folder failed");
} }
out: out:
if (!IS_ERR(process)) if (!IS_ERR(process))
@@ -440,6 +528,16 @@ static void kfd_process_destroy_pdds(struct kfd_process *p)
kfree(pdd->qpd.doorbell_bitmap); kfree(pdd->qpd.doorbell_bitmap);
idr_destroy(&pdd->alloc_idr); idr_destroy(&pdd->alloc_idr);
/*
* before destroying pdd, make sure to report availability
* for auto suspend
*/
if (pdd->runtime_inuse) {
pm_runtime_mark_last_busy(pdd->dev->ddev->dev);
pm_runtime_put_autosuspend(pdd->dev->ddev->dev);
pdd->runtime_inuse = false;
}
kfree(pdd); kfree(pdd);
} }
} }
@@ -457,6 +555,9 @@ static void kfd_process_wq_release(struct work_struct *work)
/* Remove the procfs files */ /* Remove the procfs files */
if (p->kobj) { if (p->kobj) {
sysfs_remove_file(p->kobj, &p->attr_pasid); sysfs_remove_file(p->kobj, &p->attr_pasid);
kobject_del(p->kobj_queues);
kobject_put(p->kobj_queues);
p->kobj_queues = NULL;
kobject_del(p->kobj); kobject_del(p->kobj);
kobject_put(p->kobj); kobject_put(p->kobj);
p->kobj = NULL; p->kobj = NULL;
@@ -754,6 +855,7 @@ struct kfd_process_device *kfd_create_process_device_data(struct kfd_dev *dev,
pdd->process = p; pdd->process = p;
pdd->bound = PDD_UNBOUND; pdd->bound = PDD_UNBOUND;
pdd->already_dequeued = false; pdd->already_dequeued = false;
pdd->runtime_inuse = false;
list_add(&pdd->per_device_list, &p->per_device_data); list_add(&pdd->per_device_list, &p->per_device_data);
/* Init idr used for memory handle translation */ /* Init idr used for memory handle translation */
@@ -843,15 +945,41 @@ struct kfd_process_device *kfd_bind_process_to_device(struct kfd_dev *dev,
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
} }
/*
* signal runtime-pm system to auto resume and prevent
* further runtime suspend once device pdd is created until
* pdd is destroyed.
*/
if (!pdd->runtime_inuse) {
err = pm_runtime_get_sync(dev->ddev->dev);
if (err < 0)
return ERR_PTR(err);
}
err = kfd_iommu_bind_process_to_device(pdd); err = kfd_iommu_bind_process_to_device(pdd);
if (err) if (err)
return ERR_PTR(err); goto out;
err = kfd_process_device_init_vm(pdd, NULL); err = kfd_process_device_init_vm(pdd, NULL);
if (err) if (err)
return ERR_PTR(err); goto out;
/*
* make sure that runtime_usage counter is incremented just once
* per pdd
*/
pdd->runtime_inuse = true;
return pdd; return pdd;
out:
/* balance runpm reference count and exit with error */
if (!pdd->runtime_inuse) {
pm_runtime_mark_last_busy(dev->ddev->dev);
pm_runtime_put_autosuspend(dev->ddev->dev);
}
return ERR_PTR(err);
} }
struct kfd_process_device *kfd_get_first_process_device_data( struct kfd_process_device *kfd_get_first_process_device_data(

View File

@@ -241,23 +241,18 @@ int pqm_create_queue(struct process_queue_manager *pqm,
switch (type) { switch (type) {
case KFD_QUEUE_TYPE_SDMA: case KFD_QUEUE_TYPE_SDMA:
case KFD_QUEUE_TYPE_SDMA_XGMI: case KFD_QUEUE_TYPE_SDMA_XGMI:
if ((type == KFD_QUEUE_TYPE_SDMA && dev->dqm->sdma_queue_count /* SDMA queues are always allocated statically no matter
>= get_num_sdma_queues(dev->dqm)) || * which scheduler mode is used. We also do not need to
(type == KFD_QUEUE_TYPE_SDMA_XGMI && * check whether a SDMA queue can be allocated here, because
dev->dqm->xgmi_sdma_queue_count * allocate_sdma_queue() in create_queue() has the
>= get_num_xgmi_sdma_queues(dev->dqm))) { * corresponding check logic.
pr_debug("Over-subscription is not allowed for SDMA.\n"); */
retval = -EPERM;
goto err_create_queue;
}
retval = init_user_queue(pqm, dev, &q, properties, f, *qid); retval = init_user_queue(pqm, dev, &q, properties, f, *qid);
if (retval != 0) if (retval != 0)
goto err_create_queue; goto err_create_queue;
pqn->q = q; pqn->q = q;
pqn->kq = NULL; pqn->kq = NULL;
retval = dev->dqm->ops.create_queue(dev->dqm, q, &pdd->qpd); retval = dev->dqm->ops.create_queue(dev->dqm, q, &pdd->qpd);
pr_debug("DQM returned %d for create_queue\n", retval);
print_queue(q); print_queue(q);
break; break;
@@ -266,7 +261,7 @@ int pqm_create_queue(struct process_queue_manager *pqm,
if ((dev->dqm->sched_policy == if ((dev->dqm->sched_policy ==
KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION) && KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION) &&
((dev->dqm->processes_count >= dev->vm_info.vmid_num_kfd) || ((dev->dqm->processes_count >= dev->vm_info.vmid_num_kfd) ||
(dev->dqm->queue_count >= get_queues_num(dev->dqm)))) { (dev->dqm->active_queue_count >= get_cp_queues_num(dev->dqm)))) {
pr_debug("Over-subscription is not allowed when amdkfd.sched_policy == 1\n"); pr_debug("Over-subscription is not allowed when amdkfd.sched_policy == 1\n");
retval = -EPERM; retval = -EPERM;
goto err_create_queue; goto err_create_queue;
@@ -278,7 +273,6 @@ int pqm_create_queue(struct process_queue_manager *pqm,
pqn->q = q; pqn->q = q;
pqn->kq = NULL; pqn->kq = NULL;
retval = dev->dqm->ops.create_queue(dev->dqm, q, &pdd->qpd); retval = dev->dqm->ops.create_queue(dev->dqm, q, &pdd->qpd);
pr_debug("DQM returned %d for create_queue\n", retval);
print_queue(q); print_queue(q);
break; break;
case KFD_QUEUE_TYPE_DIQ: case KFD_QUEUE_TYPE_DIQ:
@@ -299,7 +293,7 @@ int pqm_create_queue(struct process_queue_manager *pqm,
} }
if (retval != 0) { if (retval != 0) {
pr_err("Pasid 0x%x DQM create queue %d failed. ret %d\n", pr_err("Pasid 0x%x DQM create queue type %d failed. ret %d\n",
pqm->process->pasid, type, retval); pqm->process->pasid, type, retval);
goto err_create_queue; goto err_create_queue;
} }
@@ -322,12 +316,16 @@ int pqm_create_queue(struct process_queue_manager *pqm,
if (q) { if (q) {
pr_debug("PQM done creating queue\n"); pr_debug("PQM done creating queue\n");
kfd_procfs_add_queue(q);
print_queue_properties(&q->properties); print_queue_properties(&q->properties);
} }
return retval; return retval;
err_create_queue: err_create_queue:
uninit_queue(q);
if (kq)
kernel_queue_uninit(kq, false);
kfree(pqn); kfree(pqn);
err_allocate_pqn: err_allocate_pqn:
/* check if queues list is empty unregister process from device */ /* check if queues list is empty unregister process from device */
@@ -378,6 +376,7 @@ int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid)
} }
if (pqn->q) { if (pqn->q) {
kfd_procfs_del_queue(pqn->q);
dqm = pqn->q->device->dqm; dqm = pqn->q->device->dqm;
retval = dqm->ops.destroy_queue(dqm, &pdd->qpd, pqn->q); retval = dqm->ops.destroy_queue(dqm, &pdd->qpd, pqn->q);
if (retval) { if (retval) {

View File

@@ -490,6 +490,8 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr,
dev->node_props.num_sdma_queues_per_engine); dev->node_props.num_sdma_queues_per_engine);
sysfs_show_32bit_prop(buffer, "num_cp_queues", sysfs_show_32bit_prop(buffer, "num_cp_queues",
dev->node_props.num_cp_queues); dev->node_props.num_cp_queues);
sysfs_show_64bit_prop(buffer, "unique_id",
dev->node_props.unique_id);
if (dev->gpu) { if (dev->gpu) {
log_max_watch_addr = log_max_watch_addr =
@@ -1318,7 +1320,8 @@ int kfd_topology_add_device(struct kfd_dev *gpu)
dev->node_props.num_gws = (hws_gws_support && dev->node_props.num_gws = (hws_gws_support &&
dev->gpu->dqm->sched_policy != KFD_SCHED_POLICY_NO_HWS) ? dev->gpu->dqm->sched_policy != KFD_SCHED_POLICY_NO_HWS) ?
amdgpu_amdkfd_get_num_gws(dev->gpu->kgd) : 0; amdgpu_amdkfd_get_num_gws(dev->gpu->kgd) : 0;
dev->node_props.num_cp_queues = get_queues_num(dev->gpu->dqm); dev->node_props.num_cp_queues = get_cp_queues_num(dev->gpu->dqm);
dev->node_props.unique_id = gpu->unique_id;
kfd_fill_mem_clk_max_info(dev); kfd_fill_mem_clk_max_info(dev);
kfd_fill_iolink_non_crat_info(dev); kfd_fill_iolink_non_crat_info(dev);

View File

@@ -54,6 +54,7 @@
struct kfd_node_properties { struct kfd_node_properties {
uint64_t hive_id; uint64_t hive_id;
uint64_t unique_id;
uint32_t cpu_cores_count; uint32_t cpu_cores_count;
uint32_t simd_count; uint32_t simd_count;
uint32_t mem_banks_count; uint32_t mem_banks_count;

View File

@@ -98,6 +98,9 @@ MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
#define FIRMWARE_RAVEN_DMCU "amdgpu/raven_dmcu.bin" #define FIRMWARE_RAVEN_DMCU "amdgpu/raven_dmcu.bin"
MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU); MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
#define FIRMWARE_NAVI12_DMCU "amdgpu/navi12_dmcu.bin"
MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
/* Number of bytes in PSP header for firmware. */ /* Number of bytes in PSP header for firmware. */
#define PSP_HEADER_BYTES 0x100 #define PSP_HEADER_BYTES 0x100
@@ -801,10 +804,20 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev)
fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes); fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
/* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
* amdgpu_ucode_init_single_fw will load dmub firmware
* fw_inst_const part to cw0; otherwise, the firmware back door load
* will be done by dm_dmub_hw_init
*/
if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const, memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
fw_inst_const_size); fw_inst_const_size);
}
memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr, fw_bss_data, memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr, fw_bss_data,
fw_bss_data_size); fw_bss_data_size);
/* Copy firmware bios info into FB memory. */
memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios, memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
adev->bios_size); adev->bios_size);
@@ -823,6 +836,10 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev)
hw_params.fb_base = adev->gmc.fb_start; hw_params.fb_base = adev->gmc.fb_start;
hw_params.fb_offset = adev->gmc.aper_base; hw_params.fb_offset = adev->gmc.aper_base;
/* backdoor load firmware and trigger dmub running */
if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
hw_params.load_inst_const = true;
if (dmcu) if (dmcu)
hw_params.psp_version = dmcu->psp_version; hw_params.psp_version = dmcu->psp_version;
@@ -960,7 +977,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
#ifdef CONFIG_DRM_AMD_DC_HDCP #ifdef CONFIG_DRM_AMD_DC_HDCP
if (adev->asic_type >= CHIP_RAVEN) { if (adev->asic_type >= CHIP_RAVEN) {
adev->dm.hdcp_workqueue = hdcp_create_workqueue(&adev->psp, &init_params.cp_psp, adev->dm.dc); adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
if (!adev->dm.hdcp_workqueue) if (!adev->dm.hdcp_workqueue)
DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n"); DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
@@ -991,11 +1008,6 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
goto error; goto error;
} }
#if defined(CONFIG_DEBUG_FS)
if (dtn_debugfs_init(adev))
DRM_ERROR("amdgpu: failed initialize dtn debugfs support.\n");
#endif
DRM_DEBUG_DRIVER("KMS initialized.\n"); DRM_DEBUG_DRIVER("KMS initialized.\n");
return 0; return 0;
@@ -1079,9 +1091,11 @@ static int load_dmcu_fw(struct amdgpu_device *adev)
case CHIP_VEGA20: case CHIP_VEGA20:
case CHIP_NAVI10: case CHIP_NAVI10:
case CHIP_NAVI14: case CHIP_NAVI14:
case CHIP_NAVI12:
case CHIP_RENOIR: case CHIP_RENOIR:
return 0; return 0;
case CHIP_NAVI12:
fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
break;
case CHIP_RAVEN: case CHIP_RAVEN:
if (ASICREV_IS_PICASSO(adev->external_rev_id)) if (ASICREV_IS_PICASSO(adev->external_rev_id))
fw_name_dmcu = FIRMWARE_RAVEN_DMCU; fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
@@ -1192,22 +1206,21 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
return 0; return 0;
} }
if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
DRM_WARN("Only PSP firmware loading is supported for DMUB\n");
return 0;
}
hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data; hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id = adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
AMDGPU_UCODE_ID_DMCUB; AMDGPU_UCODE_ID_DMCUB;
adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw = adev->dm.dmub_fw; adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
adev->dm.dmub_fw;
adev->firmware.fw_size += adev->firmware.fw_size +=
ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE); ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n", DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
adev->dm.dmcub_fw_version); adev->dm.dmcub_fw_version);
}
adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL); adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
dmub_srv = adev->dm.dmub_srv; dmub_srv = adev->dm.dmub_srv;
@@ -1758,6 +1771,61 @@ static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
.atomic_commit_tail = amdgpu_dm_atomic_commit_tail .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
}; };
static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
{
u32 max_cll, min_cll, max, min, q, r;
struct amdgpu_dm_backlight_caps *caps;
struct amdgpu_display_manager *dm;
struct drm_connector *conn_base;
struct amdgpu_device *adev;
static const u8 pre_computed_values[] = {
50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
if (!aconnector || !aconnector->dc_link)
return;
conn_base = &aconnector->base;
adev = conn_base->dev->dev_private;
dm = &adev->dm;
caps = &dm->backlight_caps;
caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
caps->aux_support = false;
max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
if (caps->ext_caps->bits.oled == 1 ||
caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
caps->ext_caps->bits.hdr_aux_backlight_control == 1)
caps->aux_support = true;
/* From the specification (CTA-861-G), for calculating the maximum
* luminance we need to use:
* Luminance = 50*2**(CV/32)
* Where CV is a one-byte value.
* For calculating this expression we may need float point precision;
* to avoid this complexity level, we take advantage that CV is divided
* by a constant. From the Euclids division algorithm, we know that CV
* can be written as: CV = 32*q + r. Next, we replace CV in the
* Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
* need to pre-compute the value of r/32. For pre-computing the values
* We just used the following Ruby line:
* (0...32).each {|cv| puts (50*2**(cv/32.0)).round}
* The results of the above expressions can be verified at
* pre_computed_values.
*/
q = max_cll >> 5;
r = max_cll % 32;
max = (1 << q) * pre_computed_values[r];
// min luminance: maxLum * (CV/255)^2 / 100
q = DIV_ROUND_CLOSEST(min_cll, 255);
min = max * DIV_ROUND_CLOSEST((q * q), 100);
caps->aux_max_input_signal = max;
caps->aux_min_input_signal = min;
}
static void static void
amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector) amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector)
{ {
@@ -1872,7 +1940,7 @@ amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector)
aconnector->edid); aconnector->edid);
} }
amdgpu_dm_update_freesync_caps(connector, aconnector->edid); amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
update_connector_ext_caps(aconnector);
} else { } else {
drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux); drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
amdgpu_dm_update_freesync_caps(connector, NULL); amdgpu_dm_update_freesync_caps(connector, NULL);
@@ -1911,7 +1979,7 @@ static void handle_hpd_irq(void *param)
mutex_lock(&aconnector->hpd_lock); mutex_lock(&aconnector->hpd_lock);
#ifdef CONFIG_DRM_AMD_DC_HDCP #ifdef CONFIG_DRM_AMD_DC_HDCP
if (adev->asic_type >= CHIP_RAVEN) if (adev->dm.hdcp_workqueue)
hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index); hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
#endif #endif
if (aconnector->fake_enable) if (aconnector->fake_enable)
@@ -2088,8 +2156,10 @@ static void handle_hpd_rx_irq(void *param)
} }
} }
#ifdef CONFIG_DRM_AMD_DC_HDCP #ifdef CONFIG_DRM_AMD_DC_HDCP
if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
if (adev->dm.hdcp_workqueue)
hdcp_handle_cpirq(adev->dm.hdcp_workqueue, aconnector->base.index); hdcp_handle_cpirq(adev->dm.hdcp_workqueue, aconnector->base.index);
}
#endif #endif
if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) || if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
(dc_link->type == dc_connection_mst_branch)) (dc_link->type == dc_connection_mst_branch))
@@ -2484,6 +2554,7 @@ static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
#define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
#define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
#define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\ #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE) defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
@@ -2498,9 +2569,11 @@ static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
amdgpu_acpi_get_backlight_caps(dm->adev, &caps); amdgpu_acpi_get_backlight_caps(dm->adev, &caps);
if (caps.caps_valid) { if (caps.caps_valid) {
dm->backlight_caps.caps_valid = true;
if (caps.aux_support)
return;
dm->backlight_caps.min_input_signal = caps.min_input_signal; dm->backlight_caps.min_input_signal = caps.min_input_signal;
dm->backlight_caps.max_input_signal = caps.max_input_signal; dm->backlight_caps.max_input_signal = caps.max_input_signal;
dm->backlight_caps.caps_valid = true;
} else { } else {
dm->backlight_caps.min_input_signal = dm->backlight_caps.min_input_signal =
AMDGPU_DM_DEFAULT_MIN_BACKLIGHT; AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
@@ -2508,40 +2581,95 @@ static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
AMDGPU_DM_DEFAULT_MAX_BACKLIGHT; AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
} }
#else #else
if (dm->backlight_caps.aux_support)
return;
dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT; dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT; dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
#endif #endif
} }
static int set_backlight_via_aux(struct dc_link *link, uint32_t brightness)
{
bool rc;
if (!link)
return 1;
rc = dc_link_set_backlight_level_nits(link, true, brightness,
AUX_BL_DEFAULT_TRANSITION_TIME_MS);
return rc ? 0 : 1;
}
static u32 convert_brightness(const struct amdgpu_dm_backlight_caps *caps,
const uint32_t user_brightness)
{
u32 min, max, conversion_pace;
u32 brightness = user_brightness;
if (!caps)
goto out;
if (!caps->aux_support) {
max = caps->max_input_signal;
min = caps->min_input_signal;
/*
* The brightness input is in the range 0-255
* It needs to be rescaled to be between the
* requested min and max input signal
* It also needs to be scaled up by 0x101 to
* match the DC interface which has a range of
* 0 to 0xffff
*/
conversion_pace = 0x101;
brightness =
user_brightness
* conversion_pace
* (max - min)
/ AMDGPU_MAX_BL_LEVEL
+ min * conversion_pace;
} else {
/* TODO
* We are doing a linear interpolation here, which is OK but
* does not provide the optimal result. We probably want
* something close to the Perceptual Quantizer (PQ) curve.
*/
max = caps->aux_max_input_signal;
min = caps->aux_min_input_signal;
brightness = (AMDGPU_MAX_BL_LEVEL - user_brightness) * min
+ user_brightness * max;
// Multiple the value by 1000 since we use millinits
brightness *= 1000;
brightness = DIV_ROUND_CLOSEST(brightness, AMDGPU_MAX_BL_LEVEL);
}
out:
return brightness;
}
static int amdgpu_dm_backlight_update_status(struct backlight_device *bd) static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
{ {
struct amdgpu_display_manager *dm = bl_get_data(bd); struct amdgpu_display_manager *dm = bl_get_data(bd);
struct amdgpu_dm_backlight_caps caps; struct amdgpu_dm_backlight_caps caps;
uint32_t brightness = bd->props.brightness; struct dc_link *link = NULL;
u32 brightness;
bool rc;
amdgpu_dm_update_backlight_caps(dm); amdgpu_dm_update_backlight_caps(dm);
caps = dm->backlight_caps; caps = dm->backlight_caps;
/*
* The brightness input is in the range 0-255
* It needs to be rescaled to be between the
* requested min and max input signal
*
* It also needs to be scaled up by 0x101 to
* match the DC interface which has a range of
* 0 to 0xffff
*/
brightness =
brightness
* 0x101
* (caps.max_input_signal - caps.min_input_signal)
/ AMDGPU_MAX_BL_LEVEL
+ caps.min_input_signal * 0x101;
if (dc_link_set_backlight_level(dm->backlight_link, link = (struct dc_link *)dm->backlight_link;
brightness, 0))
return 0; brightness = convert_brightness(&caps, bd->props.brightness);
else // Change brightness based on AUX property
return 1; if (caps.aux_support)
return set_backlight_via_aux(link, brightness);
rc = dc_link_set_backlight_level(dm->backlight_link, brightness, 0);
return rc ? 0 : 1;
} }
static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd) static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
@@ -4493,6 +4621,19 @@ amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
return &new_state->base; return &new_state->base;
} }
static int
amdgpu_dm_connector_late_register(struct drm_connector *connector)
{
struct amdgpu_dm_connector *amdgpu_dm_connector =
to_amdgpu_dm_connector(connector);
#if defined(CONFIG_DEBUG_FS)
connector_debugfs_init(amdgpu_dm_connector);
#endif
return 0;
}
static const struct drm_connector_funcs amdgpu_dm_connector_funcs = { static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
.reset = amdgpu_dm_connector_funcs_reset, .reset = amdgpu_dm_connector_funcs_reset,
.detect = amdgpu_dm_connector_detect, .detect = amdgpu_dm_connector_detect,
@@ -4502,6 +4643,7 @@ static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state, .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
.atomic_set_property = amdgpu_dm_connector_atomic_set_property, .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
.atomic_get_property = amdgpu_dm_connector_atomic_get_property, .atomic_get_property = amdgpu_dm_connector_atomic_get_property,
.late_register = amdgpu_dm_connector_late_register,
.early_unregister = amdgpu_dm_connector_unregister .early_unregister = amdgpu_dm_connector_unregister
}; };
@@ -5705,7 +5847,7 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
drm_connector_attach_vrr_capable_property( drm_connector_attach_vrr_capable_property(
&aconnector->base); &aconnector->base);
#ifdef CONFIG_DRM_AMD_DC_HDCP #ifdef CONFIG_DRM_AMD_DC_HDCP
if (adev->asic_type >= CHIP_RAVEN) if (adev->dm.hdcp_workqueue)
drm_connector_attach_content_protection_property(&aconnector->base, true); drm_connector_attach_content_protection_property(&aconnector->base, true);
#endif #endif
} }
@@ -5842,13 +5984,6 @@ static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
drm_connector_attach_encoder( drm_connector_attach_encoder(
&aconnector->base, &aencoder->base); &aconnector->base, &aencoder->base);
drm_connector_register(&aconnector->base);
#if defined(CONFIG_DEBUG_FS)
connector_debugfs_init(aconnector);
aconnector->debugfs_dpcd_address = 0;
aconnector->debugfs_dpcd_size = 0;
#endif
if (connector_type == DRM_MODE_CONNECTOR_DisplayPort if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
|| connector_type == DRM_MODE_CONNECTOR_eDP) || connector_type == DRM_MODE_CONNECTOR_eDP)
amdgpu_dm_initialize_dp_connector(dm, aconnector); amdgpu_dm_initialize_dp_connector(dm, aconnector);

View File

@@ -90,15 +90,41 @@ struct dm_comressor_info {
}; };
/** /**
* struct amdgpu_dm_backlight_caps - Usable range of backlight values from ACPI * struct amdgpu_dm_backlight_caps - Information about backlight
* @min_input_signal: minimum possible input in range 0-255 *
* @max_input_signal: maximum possible input in range 0-255 * Describe the backlight support for ACPI or eDP AUX.
* @caps_valid: true if these values are from the ACPI interface
*/ */
struct amdgpu_dm_backlight_caps { struct amdgpu_dm_backlight_caps {
/**
* @ext_caps: Keep the data struct with all the information about the
* display support for HDR.
*/
union dpcd_sink_ext_caps *ext_caps;
/**
* @aux_min_input_signal: Min brightness value supported by the display
*/
u32 aux_min_input_signal;
/**
* @aux_max_input_signal: Max brightness value supported by the display
* in nits.
*/
u32 aux_max_input_signal;
/**
* @min_input_signal: minimum possible input in range 0-255.
*/
int min_input_signal; int min_input_signal;
/**
* @max_input_signal: maximum possible input in range 0-255.
*/
int max_input_signal; int max_input_signal;
/**
* @caps_valid: true if these values are from the ACPI interface.
*/
bool caps_valid; bool caps_valid;
/**
* @aux_support: Describes if the display supports AUX backlight.
*/
bool aux_support;
}; };
/** /**

View File

@@ -32,6 +32,19 @@
#include "amdgpu_dm.h" #include "amdgpu_dm.h"
#include "amdgpu_dm_debugfs.h" #include "amdgpu_dm_debugfs.h"
#include "dm_helpers.h" #include "dm_helpers.h"
#include "dmub/inc/dmub_srv.h"
struct dmub_debugfs_trace_header {
uint32_t entry_count;
uint32_t reserved[3];
};
struct dmub_debugfs_trace_entry {
uint32_t trace_code;
uint32_t tick_count;
uint32_t param0;
uint32_t param1;
};
/* function description /* function description
* get/ set DP configuration: lane_count, link_rate, spread_spectrum * get/ set DP configuration: lane_count, link_rate, spread_spectrum
@@ -675,6 +688,73 @@ static ssize_t dp_phy_test_pattern_debugfs_write(struct file *f, const char __us
return bytes_from_user; return bytes_from_user;
} }
/**
* Returns the DMCUB tracebuffer contents.
* Example usage: cat /sys/kernel/debug/dri/0/amdgpu_dm_dmub_tracebuffer
*/
static int dmub_tracebuffer_show(struct seq_file *m, void *data)
{
struct amdgpu_device *adev = m->private;
struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
struct dmub_debugfs_trace_entry *entries;
uint8_t *tbuf_base;
uint32_t tbuf_size, max_entries, num_entries, i;
if (!fb_info)
return 0;
tbuf_base = (uint8_t *)fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr;
if (!tbuf_base)
return 0;
tbuf_size = fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size;
max_entries = (tbuf_size - sizeof(struct dmub_debugfs_trace_header)) /
sizeof(struct dmub_debugfs_trace_entry);
num_entries =
((struct dmub_debugfs_trace_header *)tbuf_base)->entry_count;
num_entries = min(num_entries, max_entries);
entries = (struct dmub_debugfs_trace_entry
*)(tbuf_base +
sizeof(struct dmub_debugfs_trace_header));
for (i = 0; i < num_entries; ++i) {
struct dmub_debugfs_trace_entry *entry = &entries[i];
seq_printf(m,
"trace_code=%u tick_count=%u param0=%u param1=%u\n",
entry->trace_code, entry->tick_count, entry->param0,
entry->param1);
}
return 0;
}
/**
* Returns the DMCUB firmware state contents.
* Example usage: cat /sys/kernel/debug/dri/0/amdgpu_dm_dmub_fw_state
*/
static int dmub_fw_state_show(struct seq_file *m, void *data)
{
struct amdgpu_device *adev = m->private;
struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
uint8_t *state_base;
uint32_t state_size;
if (!fb_info)
return 0;
state_base = (uint8_t *)fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr;
if (!state_base)
return 0;
state_size = fb_info->fb[DMUB_WINDOW_6_FW_STATE].size;
return seq_write(m, state_base, state_size);
}
/* /*
* Returns the current and maximum output bpc for the connector. * Returns the current and maximum output bpc for the connector.
* Example usage: cat /sys/kernel/debug/dri/0/DP-1/output_bpc * Example usage: cat /sys/kernel/debug/dri/0/DP-1/output_bpc
@@ -880,6 +960,8 @@ static ssize_t dp_dpcd_data_read(struct file *f, char __user *buf,
return read_size - r; return read_size - r;
} }
DEFINE_SHOW_ATTRIBUTE(dmub_fw_state);
DEFINE_SHOW_ATTRIBUTE(dmub_tracebuffer);
DEFINE_SHOW_ATTRIBUTE(output_bpc); DEFINE_SHOW_ATTRIBUTE(output_bpc);
DEFINE_SHOW_ATTRIBUTE(vrr_range); DEFINE_SHOW_ATTRIBUTE(vrr_range);
@@ -1008,6 +1090,9 @@ void connector_debugfs_init(struct amdgpu_dm_connector *connector)
debugfs_create_file_unsafe("force_yuv420_output", 0644, dir, connector, debugfs_create_file_unsafe("force_yuv420_output", 0644, dir, connector,
&force_yuv420_output_fops); &force_yuv420_output_fops);
connector->debugfs_dpcd_address = 0;
connector->debugfs_dpcd_size = 0;
} }
/* /*
@@ -1188,5 +1273,11 @@ int dtn_debugfs_init(struct amdgpu_device *adev)
debugfs_create_file_unsafe("amdgpu_dm_visual_confirm", 0644, root, adev, debugfs_create_file_unsafe("amdgpu_dm_visual_confirm", 0644, root, adev,
&visual_confirm_fops); &visual_confirm_fops);
debugfs_create_file_unsafe("amdgpu_dm_dmub_tracebuffer", 0644, root,
adev, &dmub_tracebuffer_fops);
debugfs_create_file_unsafe("amdgpu_dm_dmub_fw_state", 0644, root,
adev, &dmub_fw_state_fops);
return 0; return 0;
} }

View File

@@ -28,6 +28,13 @@
#include "amdgpu_dm.h" #include "amdgpu_dm.h"
#include "dm_helpers.h" #include "dm_helpers.h"
#include <drm/drm_hdcp.h> #include <drm/drm_hdcp.h>
#include "hdcp_psp.h"
/*
* If the SRM version being loaded is less than or equal to the
* currently loaded SRM, psp will return 0xFFFF as the version
*/
#define PSP_SRM_VERSION_MAX 0xFFFF
static bool static bool
lp_write_i2c(void *handle, uint32_t address, const uint8_t *data, uint32_t size) lp_write_i2c(void *handle, uint32_t address, const uint8_t *data, uint32_t size)
@@ -67,6 +74,59 @@ lp_read_dpcd(void *handle, uint32_t address, uint8_t *data, uint32_t size)
return dm_helpers_dp_read_dpcd(link->ctx, link, address, data, size); return dm_helpers_dp_read_dpcd(link->ctx, link, address, data, size);
} }
static uint8_t *psp_get_srm(struct psp_context *psp, uint32_t *srm_version, uint32_t *srm_size)
{
struct ta_hdcp_shared_memory *hdcp_cmd;
if (!psp->hdcp_context.hdcp_initialized) {
DRM_WARN("Failed to get hdcp srm. HDCP TA is not initialized.");
return NULL;
}
hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP_GET_SRM;
psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
return NULL;
*srm_version = hdcp_cmd->out_msg.hdcp_get_srm.srm_version;
*srm_size = hdcp_cmd->out_msg.hdcp_get_srm.srm_buf_size;
return hdcp_cmd->out_msg.hdcp_get_srm.srm_buf;
}
static int psp_set_srm(struct psp_context *psp, uint8_t *srm, uint32_t srm_size, uint32_t *srm_version)
{
struct ta_hdcp_shared_memory *hdcp_cmd;
if (!psp->hdcp_context.hdcp_initialized) {
DRM_WARN("Failed to get hdcp srm. HDCP TA is not initialized.");
return -EINVAL;
}
hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
memcpy(hdcp_cmd->in_msg.hdcp_set_srm.srm_buf, srm, srm_size);
hdcp_cmd->in_msg.hdcp_set_srm.srm_buf_size = srm_size;
hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP_SET_SRM;
psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS || hdcp_cmd->out_msg.hdcp_set_srm.valid_signature != 1 ||
hdcp_cmd->out_msg.hdcp_set_srm.srm_version == PSP_SRM_VERSION_MAX)
return -EINVAL;
*srm_version = hdcp_cmd->out_msg.hdcp_set_srm.srm_version;
return 0;
}
static void process_output(struct hdcp_workqueue *hdcp_work) static void process_output(struct hdcp_workqueue *hdcp_work)
{ {
struct mod_hdcp_output output = hdcp_work->output; struct mod_hdcp_output output = hdcp_work->output;
@@ -88,6 +148,18 @@ static void process_output(struct hdcp_workqueue *hdcp_work)
schedule_delayed_work(&hdcp_work->property_validate_dwork, msecs_to_jiffies(0)); schedule_delayed_work(&hdcp_work->property_validate_dwork, msecs_to_jiffies(0));
} }
static void link_lock(struct hdcp_workqueue *work, bool lock)
{
int i = 0;
for (i = 0; i < work->max_link; i++) {
if (lock)
mutex_lock(&work[i].mutex);
else
mutex_unlock(&work[i].mutex);
}
}
void hdcp_update_display(struct hdcp_workqueue *hdcp_work, void hdcp_update_display(struct hdcp_workqueue *hdcp_work,
unsigned int link_index, unsigned int link_index,
struct amdgpu_dm_connector *aconnector, struct amdgpu_dm_connector *aconnector,
@@ -112,6 +184,13 @@ void hdcp_update_display(struct hdcp_workqueue *hdcp_work,
hdcp_w->link.adjust.hdcp2.force_type = MOD_HDCP_FORCE_TYPE_0; hdcp_w->link.adjust.hdcp2.force_type = MOD_HDCP_FORCE_TYPE_0;
if (enable_encryption) { if (enable_encryption) {
/* Explicitly set the saved SRM as sysfs call will be after we already enabled hdcp
* (s3 resume case)
*/
if (hdcp_work->srm_size > 0)
psp_set_srm(hdcp_work->hdcp.config.psp.handle, hdcp_work->srm, hdcp_work->srm_size,
&hdcp_work->srm_version);
display->adjust.disable = 0; display->adjust.disable = 0;
if (content_type == DRM_MODE_HDCP_CONTENT_TYPE0) if (content_type == DRM_MODE_HDCP_CONTENT_TYPE0)
hdcp_w->link.adjust.hdcp2.force_type = MOD_HDCP_FORCE_TYPE_0; hdcp_w->link.adjust.hdcp2.force_type = MOD_HDCP_FORCE_TYPE_0;
@@ -301,8 +380,9 @@ void hdcp_destroy(struct hdcp_workqueue *hdcp_work)
cancel_delayed_work_sync(&hdcp_work[i].watchdog_timer_dwork); cancel_delayed_work_sync(&hdcp_work[i].watchdog_timer_dwork);
} }
kfree(hdcp_work->srm);
kfree(hdcp_work->srm_temp);
kfree(hdcp_work); kfree(hdcp_work);
} }
static void update_config(void *handle, struct cp_psp_stream_config *config) static void update_config(void *handle, struct cp_psp_stream_config *config)
@@ -338,20 +418,163 @@ static void update_config(void *handle, struct cp_psp_stream_config *config)
hdcp_update_display(hdcp_work, link_index, aconnector, DRM_MODE_HDCP_CONTENT_TYPE0, false); hdcp_update_display(hdcp_work, link_index, aconnector, DRM_MODE_HDCP_CONTENT_TYPE0, false);
} }
struct hdcp_workqueue *hdcp_create_workqueue(void *psp_context, struct cp_psp *cp_psp, struct dc *dc)
/* NOTE: From the usermodes prospective you only need to call write *ONCE*, the kernel
* will automatically call once or twice depending on the size
*
* call: "cat file > /sys/class/drm/card0/device/hdcp_srm" from usermode no matter what the size is
*
* The kernel can only send PAGE_SIZE at once and since MAX_SRM_FILE(5120) > PAGE_SIZE(4096),
* srm_data_write can be called multiple times.
*
* sysfs interface doesn't tell us the size we will get so we are sending partial SRMs to psp and on
* the last call we will send the full SRM. PSP will fail on every call before the last.
*
* This means we don't know if the SRM is good until the last call. And because of this limitation we
* cannot throw errors early as it will stop the kernel from writing to sysfs
*
* Example 1:
* Good SRM size = 5096
* first call to write 4096 -> PSP fails
* Second call to write 1000 -> PSP Pass -> SRM is set
*
* Example 2:
* Bad SRM size = 4096
* first call to write 4096 -> PSP fails (This is the same as above, but we don't know if this
* is the last call)
*
* Solution?:
* 1: Parse the SRM? -> It is signed so we don't know the EOF
* 2: We can have another sysfs that passes the size before calling set. -> simpler solution
* below
*
* Easy Solution:
* Always call get after Set to verify if set was successful.
* +----------------------+
* | Why it works: |
* +----------------------+
* PSP will only update its srm if its older than the one we are trying to load.
* Always do set first than get.
* -if we try to "1. SET" a older version PSP will reject it and we can "2. GET" the newer
* version and save it
*
* -if we try to "1. SET" a newer version PSP will accept it and we can "2. GET" the
* same(newer) version back and save it
*
* -if we try to "1. SET" a newer version and PSP rejects it. That means the format is
* incorrect/corrupted and we should correct our SRM by getting it from PSP
*/
static ssize_t srm_data_write(struct file *filp, struct kobject *kobj, struct bin_attribute *bin_attr, char *buffer,
loff_t pos, size_t count)
{
struct hdcp_workqueue *work;
uint32_t srm_version = 0;
work = container_of(bin_attr, struct hdcp_workqueue, attr);
link_lock(work, true);
memcpy(work->srm_temp + pos, buffer, count);
if (!psp_set_srm(work->hdcp.config.psp.handle, work->srm_temp, pos + count, &srm_version)) {
DRM_DEBUG_DRIVER("HDCP SRM SET version 0x%X", srm_version);
memcpy(work->srm, work->srm_temp, pos + count);
work->srm_size = pos + count;
work->srm_version = srm_version;
}
link_lock(work, false);
return count;
}
static ssize_t srm_data_read(struct file *filp, struct kobject *kobj, struct bin_attribute *bin_attr, char *buffer,
loff_t pos, size_t count)
{
struct hdcp_workqueue *work;
uint8_t *srm = NULL;
uint32_t srm_version;
uint32_t srm_size;
size_t ret = count;
work = container_of(bin_attr, struct hdcp_workqueue, attr);
link_lock(work, true);
srm = psp_get_srm(work->hdcp.config.psp.handle, &srm_version, &srm_size);
if (!srm)
return -EINVAL;
if (pos >= srm_size)
ret = 0;
if (srm_size - pos < count) {
memcpy(buffer, srm + pos, srm_size - pos);
ret = srm_size - pos;
goto ret;
}
memcpy(buffer, srm + pos, count);
ret:
link_lock(work, false);
return ret;
}
/* From the hdcp spec (5.Renewability) SRM needs to be stored in a non-volatile memory.
*
* For example,
* if Application "A" sets the SRM (ver 2) and we reboot/suspend and later when Application "B"
* needs to use HDCP, the version in PSP should be SRM(ver 2). So SRM should be persistent
* across boot/reboots/suspend/resume/shutdown
*
* Currently when the system goes down (suspend/shutdown) the SRM is cleared from PSP. For HDCP we need
* to make the SRM persistent.
*
* -PSP owns the checking of SRM but doesn't have the ability to store it in a non-volatile memory.
* -The kernel cannot write to the file systems.
* -So we need usermode to do this for us, which is why an interface for usermode is needed
*
*
*
* Usermode can read/write to/from PSP using the sysfs interface
* For example:
* to save SRM from PSP to storage : cat /sys/class/drm/card0/device/hdcp_srm > srmfile
* to load from storage to PSP: cat srmfile > /sys/class/drm/card0/device/hdcp_srm
*/
static const struct bin_attribute data_attr = {
.attr = {.name = "hdcp_srm", .mode = 0664},
.size = PSP_HDCP_SRM_FIRST_GEN_MAX_SIZE, /* Limit SRM size */
.write = srm_data_write,
.read = srm_data_read,
};
struct hdcp_workqueue *hdcp_create_workqueue(struct amdgpu_device *adev, struct cp_psp *cp_psp, struct dc *dc)
{ {
int max_caps = dc->caps.max_links; int max_caps = dc->caps.max_links;
struct hdcp_workqueue *hdcp_work = kzalloc(max_caps*sizeof(*hdcp_work), GFP_KERNEL); struct hdcp_workqueue *hdcp_work;
int i = 0; int i = 0;
hdcp_work = kcalloc(max_caps, sizeof(*hdcp_work), GFP_KERNEL);
if (hdcp_work == NULL) if (hdcp_work == NULL)
return NULL;
hdcp_work->srm = kcalloc(PSP_HDCP_SRM_FIRST_GEN_MAX_SIZE, sizeof(*hdcp_work->srm), GFP_KERNEL);
if (hdcp_work->srm == NULL)
goto fail_alloc_context;
hdcp_work->srm_temp = kcalloc(PSP_HDCP_SRM_FIRST_GEN_MAX_SIZE, sizeof(*hdcp_work->srm_temp), GFP_KERNEL);
if (hdcp_work->srm_temp == NULL)
goto fail_alloc_context; goto fail_alloc_context;
hdcp_work->max_link = max_caps; hdcp_work->max_link = max_caps;
for (i = 0; i < max_caps; i++) { for (i = 0; i < max_caps; i++) {
mutex_init(&hdcp_work[i].mutex); mutex_init(&hdcp_work[i].mutex);
INIT_WORK(&hdcp_work[i].cpirq_work, event_cpirq); INIT_WORK(&hdcp_work[i].cpirq_work, event_cpirq);
@@ -360,7 +583,7 @@ struct hdcp_workqueue *hdcp_create_workqueue(void *psp_context, struct cp_psp *c
INIT_DELAYED_WORK(&hdcp_work[i].watchdog_timer_dwork, event_watchdog_timer); INIT_DELAYED_WORK(&hdcp_work[i].watchdog_timer_dwork, event_watchdog_timer);
INIT_DELAYED_WORK(&hdcp_work[i].property_validate_dwork, event_property_validate); INIT_DELAYED_WORK(&hdcp_work[i].property_validate_dwork, event_property_validate);
hdcp_work[i].hdcp.config.psp.handle = psp_context; hdcp_work[i].hdcp.config.psp.handle = &adev->psp;
hdcp_work[i].hdcp.config.ddc.handle = dc_get_link_at_index(dc, i); hdcp_work[i].hdcp.config.ddc.handle = dc_get_link_at_index(dc, i);
hdcp_work[i].hdcp.config.ddc.funcs.write_i2c = lp_write_i2c; hdcp_work[i].hdcp.config.ddc.funcs.write_i2c = lp_write_i2c;
hdcp_work[i].hdcp.config.ddc.funcs.read_i2c = lp_read_i2c; hdcp_work[i].hdcp.config.ddc.funcs.read_i2c = lp_read_i2c;
@@ -371,9 +594,17 @@ struct hdcp_workqueue *hdcp_create_workqueue(void *psp_context, struct cp_psp *c
cp_psp->funcs.update_stream_config = update_config; cp_psp->funcs.update_stream_config = update_config;
cp_psp->handle = hdcp_work; cp_psp->handle = hdcp_work;
/* File created at /sys/class/drm/card0/device/hdcp_srm*/
hdcp_work[0].attr = data_attr;
if (sysfs_create_bin_file(&adev->dev->kobj, &hdcp_work[0].attr))
DRM_WARN("Failed to create device file hdcp_srm");
return hdcp_work; return hdcp_work;
fail_alloc_context: fail_alloc_context:
kfree(hdcp_work->srm);
kfree(hdcp_work->srm_temp);
kfree(hdcp_work); kfree(hdcp_work);
return NULL; return NULL;

View File

@@ -30,6 +30,7 @@
#include "hdcp.h" #include "hdcp.h"
#include "dc.h" #include "dc.h"
#include "dm_cp_psp.h" #include "dm_cp_psp.h"
#include "amdgpu.h"
struct mod_hdcp; struct mod_hdcp;
struct mod_hdcp_link; struct mod_hdcp_link;
@@ -52,6 +53,12 @@ struct hdcp_workqueue {
enum mod_hdcp_encryption_status encryption_status; enum mod_hdcp_encryption_status encryption_status;
uint8_t max_link; uint8_t max_link;
uint8_t *srm;
uint8_t *srm_temp;
uint32_t srm_version;
uint32_t srm_size;
struct bin_attribute attr;
}; };
void hdcp_update_display(struct hdcp_workqueue *hdcp_work, void hdcp_update_display(struct hdcp_workqueue *hdcp_work,
@@ -64,6 +71,6 @@ void hdcp_reset_display(struct hdcp_workqueue *work, unsigned int link_index);
void hdcp_handle_cpirq(struct hdcp_workqueue *work, unsigned int link_index); void hdcp_handle_cpirq(struct hdcp_workqueue *work, unsigned int link_index);
void hdcp_destroy(struct hdcp_workqueue *work); void hdcp_destroy(struct hdcp_workqueue *work);
struct hdcp_workqueue *hdcp_create_workqueue(void *psp_context, struct cp_psp *cp_psp, struct dc *dc); struct hdcp_workqueue *hdcp_create_workqueue(struct amdgpu_device *adev, struct cp_psp *cp_psp, struct dc *dc);
#endif /* AMDGPU_DM_AMDGPU_DM_HDCP_H_ */ #endif /* AMDGPU_DM_AMDGPU_DM_HDCP_H_ */

View File

@@ -154,15 +154,18 @@ amdgpu_dm_mst_connector_late_register(struct drm_connector *connector)
{ {
struct amdgpu_dm_connector *amdgpu_dm_connector = struct amdgpu_dm_connector *amdgpu_dm_connector =
to_amdgpu_dm_connector(connector); to_amdgpu_dm_connector(connector);
struct drm_dp_mst_port *port = amdgpu_dm_connector->port; int r;
amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
if (r)
return r;
#if defined(CONFIG_DEBUG_FS) #if defined(CONFIG_DEBUG_FS)
connector_debugfs_init(amdgpu_dm_connector); connector_debugfs_init(amdgpu_dm_connector);
amdgpu_dm_connector->debugfs_dpcd_address = 0;
amdgpu_dm_connector->debugfs_dpcd_size = 0;
#endif #endif
return drm_dp_mst_connector_late_register(connector, port); return r;
} }
static void static void
@@ -482,11 +485,10 @@ void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm,
struct amdgpu_dm_connector *aconnector) struct amdgpu_dm_connector *aconnector)
{ {
aconnector->dm_dp_aux.aux.name = "dmdc"; aconnector->dm_dp_aux.aux.name = "dmdc";
aconnector->dm_dp_aux.aux.dev = aconnector->base.kdev;
aconnector->dm_dp_aux.aux.transfer = dm_dp_aux_transfer; aconnector->dm_dp_aux.aux.transfer = dm_dp_aux_transfer;
aconnector->dm_dp_aux.ddc_service = aconnector->dc_link->ddc; aconnector->dm_dp_aux.ddc_service = aconnector->dc_link->ddc;
drm_dp_aux_register(&aconnector->dm_dp_aux.aux); drm_dp_aux_init(&aconnector->dm_dp_aux.aux);
drm_dp_cec_register_connector(&aconnector->dm_dp_aux.aux, drm_dp_cec_register_connector(&aconnector->dm_dp_aux.aux,
&aconnector->base); &aconnector->base);

View File

@@ -221,8 +221,8 @@ static void init_transmitter_control(struct bios_parser *bp)
uint8_t frev; uint8_t frev;
uint8_t crev; uint8_t crev;
if (BIOS_CMD_TABLE_REVISION(dig1transmittercontrol, frev, crev) == false) BIOS_CMD_TABLE_REVISION(dig1transmittercontrol, frev, crev);
BREAK_TO_DEBUGGER();
switch (crev) { switch (crev) {
case 6: case 6:
bp->cmd_tbl.transmitter_control = transmitter_control_v1_6; bp->cmd_tbl.transmitter_control = transmitter_control_v1_6;

View File

@@ -53,25 +53,18 @@ bool dal_bios_parser_init_cmd_tbl_helper2(
case DCE_VERSION_11_2: case DCE_VERSION_11_2:
case DCE_VERSION_11_22: case DCE_VERSION_11_22:
case DCE_VERSION_12_0:
case DCE_VERSION_12_1:
*h = dal_cmd_tbl_helper_dce112_get_table2(); *h = dal_cmd_tbl_helper_dce112_get_table2();
return true; return true;
#if defined(CONFIG_DRM_AMD_DC_DCN) #if defined(CONFIG_DRM_AMD_DC_DCN)
case DCN_VERSION_1_0: case DCN_VERSION_1_0:
case DCN_VERSION_1_01: case DCN_VERSION_1_01:
*h = dal_cmd_tbl_helper_dce112_get_table2();
return true;
#endif
case DCN_VERSION_2_0: case DCN_VERSION_2_0:
*h = dal_cmd_tbl_helper_dce112_get_table2();
return true;
case DCN_VERSION_2_1: case DCN_VERSION_2_1:
*h = dal_cmd_tbl_helper_dce112_get_table2(); *h = dal_cmd_tbl_helper_dce112_get_table2();
return true; return true;
case DCE_VERSION_12_0: #endif
case DCE_VERSION_12_1:
*h = dal_cmd_tbl_helper_dce112_get_table2();
return true;
default: default:
/* Unsupported DCE */ /* Unsupported DCE */

View File

@@ -703,11 +703,19 @@ static void hack_bounding_box(struct dcn_bw_internal_vars *v,
} }
unsigned int get_highest_allowed_voltage_level(uint32_t hw_internal_rev) unsigned int get_highest_allowed_voltage_level(uint32_t hw_internal_rev, uint32_t pci_revision_id)
{ {
/* for dali & pollock, the highest voltage level we want is 0 */ /* for low power RV2 variants, the highest voltage level we want is 0 */
if (ASICREV_IS_POLLOCK(hw_internal_rev) || ASICREV_IS_DALI(hw_internal_rev)) if (ASICREV_IS_RAVEN2(hw_internal_rev))
switch (pci_revision_id) {
case PRID_DALI_DE:
case PRID_DALI_DF:
case PRID_DALI_E3:
case PRID_DALI_E4:
return 0; return 0;
default:
break;
}
/* we are ok with all levels */ /* we are ok with all levels */
return 4; return 4;
@@ -1277,7 +1285,9 @@ bool dcn_validate_bandwidth(
PERFORMANCE_TRACE_END(); PERFORMANCE_TRACE_END();
BW_VAL_TRACE_FINISH(); BW_VAL_TRACE_FINISH();
if (bw_limit_pass && v->voltage_level <= get_highest_allowed_voltage_level(dc->ctx->asic_id.hw_internal_rev)) if (bw_limit_pass && v->voltage_level <= get_highest_allowed_voltage_level(
dc->ctx->asic_id.hw_internal_rev,
dc->ctx->asic_id.pci_revision_id))
return true; return true;
else else
return false; return false;

View File

@@ -63,6 +63,25 @@ int clk_mgr_helper_get_active_display_cnt(
return display_count; return display_count;
} }
int clk_mgr_helper_get_active_plane_cnt(
struct dc *dc,
struct dc_state *context)
{
int i, total_plane_count;
total_plane_count = 0;
for (i = 0; i < context->stream_count; i++) {
const struct dc_stream_status stream_status = context->stream_status[i];
/*
* Sum up plane_count for all streams ( active and virtual ).
*/
total_plane_count += stream_status.plane_count;
}
return total_plane_count;
}
void clk_mgr_exit_optimized_pwr_state(const struct dc *dc, struct clk_mgr *clk_mgr) void clk_mgr_exit_optimized_pwr_state(const struct dc *dc, struct clk_mgr *clk_mgr)
{ {
struct dc_link *edp_link = get_edp_link(dc); struct dc_link *edp_link = get_edp_link(dc);
@@ -134,13 +153,6 @@ struct clk_mgr *dc_clk_mgr_create(struct dc_context *ctx, struct pp_smu_funcs *p
#if defined(CONFIG_DRM_AMD_DC_DCN) #if defined(CONFIG_DRM_AMD_DC_DCN)
case FAMILY_RV: case FAMILY_RV:
if (ASICREV_IS_DALI(asic_id.hw_internal_rev) ||
ASICREV_IS_POLLOCK(asic_id.hw_internal_rev)) {
/* TEMP: this check has to come before ASICREV_IS_RENOIR */
/* which also incorrectly returns true for Dali/Pollock*/
rv2_clk_mgr_construct(ctx, clk_mgr, pp_smu);
break;
}
if (ASICREV_IS_RENOIR(asic_id.hw_internal_rev)) { if (ASICREV_IS_RENOIR(asic_id.hw_internal_rev)) {
rn_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg); rn_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg);
break; break;

View File

@@ -158,6 +158,8 @@ void dcn2_update_clocks(struct clk_mgr *clk_mgr_base,
bool dpp_clock_lowered = false; bool dpp_clock_lowered = false;
struct dmcu *dmcu = clk_mgr_base->ctx->dc->res_pool->dmcu; struct dmcu *dmcu = clk_mgr_base->ctx->dc->res_pool->dmcu;
bool force_reset = false; bool force_reset = false;
bool p_state_change_support;
int total_plane_count;
if (dc->work_arounds.skip_clock_update) if (dc->work_arounds.skip_clock_update)
return; return;
@@ -213,9 +215,11 @@ void dcn2_update_clocks(struct clk_mgr *clk_mgr_base,
pp_smu->set_hard_min_socclk_by_freq(&pp_smu->pp_smu, clk_mgr_base->clks.socclk_khz / 1000); pp_smu->set_hard_min_socclk_by_freq(&pp_smu->pp_smu, clk_mgr_base->clks.socclk_khz / 1000);
} }
if (should_update_pstate_support(safe_to_lower, new_clocks->p_state_change_support, clk_mgr_base->clks.p_state_change_support)) { total_plane_count = clk_mgr_helper_get_active_plane_cnt(dc, context);
p_state_change_support = new_clocks->p_state_change_support || (total_plane_count == 0);
if (should_update_pstate_support(safe_to_lower, p_state_change_support, clk_mgr_base->clks.p_state_change_support)) {
clk_mgr_base->clks.prev_p_state_change_support = clk_mgr_base->clks.p_state_change_support; clk_mgr_base->clks.prev_p_state_change_support = clk_mgr_base->clks.p_state_change_support;
clk_mgr_base->clks.p_state_change_support = new_clocks->p_state_change_support; clk_mgr_base->clks.p_state_change_support = p_state_change_support;
if (pp_smu && pp_smu->set_pstate_handshake_support) if (pp_smu && pp_smu->set_pstate_handshake_support)
pp_smu->set_pstate_handshake_support(&pp_smu->pp_smu, clk_mgr_base->clks.p_state_change_support); pp_smu->set_pstate_handshake_support(&pp_smu->pp_smu, clk_mgr_base->clks.p_state_change_support);
} }

View File

@@ -405,7 +405,7 @@ void rn_init_clocks(struct clk_mgr *clk_mgr)
clk_mgr->clks.pwr_state = DCN_PWR_STATE_UNKNOWN; clk_mgr->clks.pwr_state = DCN_PWR_STATE_UNKNOWN;
} }
void build_watermark_ranges(struct clk_bw_params *bw_params, struct pp_smu_wm_range_sets *ranges) static void build_watermark_ranges(struct clk_bw_params *bw_params, struct pp_smu_wm_range_sets *ranges)
{ {
int i, num_valid_sets; int i, num_valid_sets;
@@ -465,16 +465,15 @@ void build_watermark_ranges(struct clk_bw_params *bw_params, struct pp_smu_wm_ra
static void rn_notify_wm_ranges(struct clk_mgr *clk_mgr_base) static void rn_notify_wm_ranges(struct clk_mgr *clk_mgr_base)
{ {
struct dc_debug_options *debug = &clk_mgr_base->ctx->dc->debug; struct dc_debug_options *debug = &clk_mgr_base->ctx->dc->debug;
struct pp_smu_wm_range_sets ranges = {0};
struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
struct pp_smu_funcs *pp_smu = clk_mgr->pp_smu; struct pp_smu_funcs *pp_smu = clk_mgr->pp_smu;
if (!debug->disable_pplib_wm_range) { if (!debug->disable_pplib_wm_range) {
build_watermark_ranges(clk_mgr_base->bw_params, &ranges); build_watermark_ranges(clk_mgr_base->bw_params, &clk_mgr_base->ranges);
/* Notify PP Lib/SMU which Watermarks to use for which clock ranges */ /* Notify PP Lib/SMU which Watermarks to use for which clock ranges */
if (pp_smu && pp_smu->rn_funcs.set_wm_ranges) if (pp_smu && pp_smu->rn_funcs.set_wm_ranges)
pp_smu->rn_funcs.set_wm_ranges(&pp_smu->rn_funcs.pp_smu, &ranges); pp_smu->rn_funcs.set_wm_ranges(&pp_smu->rn_funcs.pp_smu, &clk_mgr_base->ranges);
} }
} }
@@ -504,7 +503,7 @@ static struct clk_mgr_funcs dcn21_funcs = {
.notify_wm_ranges = rn_notify_wm_ranges .notify_wm_ranges = rn_notify_wm_ranges
}; };
struct clk_bw_params rn_bw_params = { static struct clk_bw_params rn_bw_params = {
.vram_type = Ddr4MemType, .vram_type = Ddr4MemType,
.num_channels = 1, .num_channels = 1,
.clk_table = { .clk_table = {
@@ -544,7 +543,7 @@ struct clk_bw_params rn_bw_params = {
}; };
struct wm_table ddr4_wm_table = { static struct wm_table ddr4_wm_table = {
.entries = { .entries = {
{ {
.wm_inst = WM_A, .wm_inst = WM_A,
@@ -581,7 +580,7 @@ struct wm_table ddr4_wm_table = {
} }
}; };
struct wm_table lpddr4_wm_table = { static struct wm_table lpddr4_wm_table = {
.entries = { .entries = {
{ {
.wm_inst = WM_A, .wm_inst = WM_A,

View File

@@ -701,7 +701,7 @@ static bool dc_construct(struct dc *dc,
dc_ctx->created_bios = true; dc_ctx->created_bios = true;
} }
dc->vendor_signature = init_params->vendor_signature;
/* Create GPIO service */ /* Create GPIO service */
dc_ctx->gpio_service = dal_gpio_service_create( dc_ctx->gpio_service = dal_gpio_service_create(
@@ -761,6 +761,28 @@ static bool disable_all_writeback_pipes_for_stream(
return true; return true;
} }
void apply_ctx_interdependent_lock(struct dc *dc, struct dc_state *context, struct dc_stream_state *stream, bool lock)
{
int i = 0;
/* Checks if interdependent update function pointer is NULL or not, takes care of DCE110 case */
if (dc->hwss.interdependent_update_lock)
dc->hwss.interdependent_update_lock(dc, context, lock);
else {
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
struct pipe_ctx *old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
// Copied conditions that were previously in dce110_apply_ctx_for_surface
if (stream == pipe_ctx->stream) {
if (!pipe_ctx->top_pipe &&
(pipe_ctx->plane_state || old_pipe_ctx->plane_state))
dc->hwss.pipe_control_lock(dc, pipe_ctx, lock);
}
}
}
}
static void disable_dangling_plane(struct dc *dc, struct dc_state *context) static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
{ {
int i, j; int i, j;
@@ -786,11 +808,20 @@ static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
if (should_disable && old_stream) { if (should_disable && old_stream) {
dc_rem_all_planes_for_stream(dc, old_stream, dangling_context); dc_rem_all_planes_for_stream(dc, old_stream, dangling_context);
disable_all_writeback_pipes_for_stream(dc, old_stream, dangling_context); disable_all_writeback_pipes_for_stream(dc, old_stream, dangling_context);
if (dc->hwss.apply_ctx_for_surface)
if (dc->hwss.apply_ctx_for_surface) {
apply_ctx_interdependent_lock(dc, dc->current_state, old_stream, true);
dc->hwss.apply_ctx_for_surface(dc, old_stream, 0, dangling_context); dc->hwss.apply_ctx_for_surface(dc, old_stream, 0, dangling_context);
apply_ctx_interdependent_lock(dc, dc->current_state, old_stream, false);
dc->hwss.post_unlock_program_front_end(dc, dangling_context);
} }
if (dc->hwss.program_front_end_for_ctx) if (dc->hwss.program_front_end_for_ctx) {
dc->hwss.interdependent_update_lock(dc, dc->current_state, true);
dc->hwss.program_front_end_for_ctx(dc, dangling_context); dc->hwss.program_front_end_for_ctx(dc, dangling_context);
dc->hwss.interdependent_update_lock(dc, dc->current_state, false);
dc->hwss.post_unlock_program_front_end(dc, dangling_context);
}
}
} }
current_ctx = dc->current_state; current_ctx = dc->current_state;
@@ -1210,15 +1241,18 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
/* re-program planes for existing stream, in case we need to /* re-program planes for existing stream, in case we need to
* free up plane resource for later use * free up plane resource for later use
*/ */
if (dc->hwss.apply_ctx_for_surface) if (dc->hwss.apply_ctx_for_surface) {
for (i = 0; i < context->stream_count; i++) { for (i = 0; i < context->stream_count; i++) {
if (context->streams[i]->mode_changed) if (context->streams[i]->mode_changed)
continue; continue;
apply_ctx_interdependent_lock(dc, context, context->streams[i], true);
dc->hwss.apply_ctx_for_surface( dc->hwss.apply_ctx_for_surface(
dc, context->streams[i], dc, context->streams[i],
context->stream_status[i].plane_count, context->stream_status[i].plane_count,
context); /* use new pipe config in new context */ context); /* use new pipe config in new context */
apply_ctx_interdependent_lock(dc, context, context->streams[i], false);
dc->hwss.post_unlock_program_front_end(dc, context);
}
} }
/* Program hardware */ /* Program hardware */
@@ -1238,19 +1272,27 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
} }
/* Program all planes within new context*/ /* Program all planes within new context*/
if (dc->hwss.program_front_end_for_ctx) if (dc->hwss.program_front_end_for_ctx) {
dc->hwss.interdependent_update_lock(dc, context, true);
dc->hwss.program_front_end_for_ctx(dc, context); dc->hwss.program_front_end_for_ctx(dc, context);
dc->hwss.interdependent_update_lock(dc, context, false);
dc->hwss.post_unlock_program_front_end(dc, context);
}
for (i = 0; i < context->stream_count; i++) { for (i = 0; i < context->stream_count; i++) {
const struct dc_link *link = context->streams[i]->link; const struct dc_link *link = context->streams[i]->link;
if (!context->streams[i]->mode_changed) if (!context->streams[i]->mode_changed)
continue; continue;
if (dc->hwss.apply_ctx_for_surface) if (dc->hwss.apply_ctx_for_surface) {
apply_ctx_interdependent_lock(dc, context, context->streams[i], true);
dc->hwss.apply_ctx_for_surface( dc->hwss.apply_ctx_for_surface(
dc, context->streams[i], dc, context->streams[i],
context->stream_status[i].plane_count, context->stream_status[i].plane_count,
context); context);
apply_ctx_interdependent_lock(dc, context, context->streams[i], false);
dc->hwss.post_unlock_program_front_end(dc, context);
}
/* /*
* enable stereo * enable stereo
@@ -1318,18 +1360,12 @@ bool dc_commit_state(struct dc *dc, struct dc_state *context)
return (result == DC_OK); return (result == DC_OK);
} }
bool dc_is_hw_initialized(struct dc *dc)
{
struct dc_bios *dcb = dc->ctx->dc_bios;
return dcb->funcs->is_accelerated_mode(dcb);
}
bool dc_post_update_surfaces_to_stream(struct dc *dc) bool dc_post_update_surfaces_to_stream(struct dc *dc)
{ {
int i; int i;
struct dc_state *context = dc->current_state; struct dc_state *context = dc->current_state;
if (!dc->optimized_required || dc->optimize_seamless_boot_streams > 0) if ((!dc->clk_optimized_required && !dc->wm_optimized_required) || dc->optimize_seamless_boot_streams > 0)
return true; return true;
post_surface_trace(dc); post_surface_trace(dc);
@@ -1341,8 +1377,6 @@ bool dc_post_update_surfaces_to_stream(struct dc *dc)
dc->hwss.disable_plane(dc, &context->res_ctx.pipe_ctx[i]); dc->hwss.disable_plane(dc, &context->res_ctx.pipe_ctx[i]);
} }
dc->optimized_required = false;
dc->hwss.optimize_bandwidth(dc, context); dc->hwss.optimize_bandwidth(dc, context);
return true; return true;
} }
@@ -1734,14 +1768,15 @@ static enum surface_update_type check_update_surfaces_for_stream(
if (stream_update->wb_update) if (stream_update->wb_update)
su_flags->bits.wb_update = 1; su_flags->bits.wb_update = 1;
if (stream_update->dsc_config)
su_flags->bits.dsc_changed = 1;
if (su_flags->raw != 0) if (su_flags->raw != 0)
overall_type = UPDATE_TYPE_FULL; overall_type = UPDATE_TYPE_FULL;
if (stream_update->output_csc_transform || stream_update->output_color_space) if (stream_update->output_csc_transform || stream_update->output_color_space)
su_flags->bits.out_csc = 1; su_flags->bits.out_csc = 1;
if (stream_update->dsc_config)
overall_type = UPDATE_TYPE_FULL;
} }
for (i = 0 ; i < surface_count; i++) { for (i = 0 ; i < surface_count; i++) {
@@ -1776,8 +1811,11 @@ enum surface_update_type dc_check_update_surfaces_for_stream(
type = check_update_surfaces_for_stream(dc, updates, surface_count, stream_update, stream_status); type = check_update_surfaces_for_stream(dc, updates, surface_count, stream_update, stream_status);
if (type == UPDATE_TYPE_FULL) { if (type == UPDATE_TYPE_FULL) {
if (stream_update) if (stream_update) {
uint32_t dsc_changed = stream_update->stream->update_flags.bits.dsc_changed;
stream_update->stream->update_flags.raw = 0xFFFFFFFF; stream_update->stream->update_flags.raw = 0xFFFFFFFF;
stream_update->stream->update_flags.bits.dsc_changed = dsc_changed;
}
for (i = 0; i < surface_count; i++) for (i = 0; i < surface_count; i++)
updates[i].surface->update_flags.raw = 0xFFFFFFFF; updates[i].surface->update_flags.raw = 0xFFFFFFFF;
} }
@@ -1786,10 +1824,10 @@ enum surface_update_type dc_check_update_surfaces_for_stream(
// If there's an available clock comparator, we use that. // If there's an available clock comparator, we use that.
if (dc->clk_mgr->funcs->are_clock_states_equal) { if (dc->clk_mgr->funcs->are_clock_states_equal) {
if (!dc->clk_mgr->funcs->are_clock_states_equal(&dc->clk_mgr->clks, &dc->current_state->bw_ctx.bw.dcn.clk)) if (!dc->clk_mgr->funcs->are_clock_states_equal(&dc->clk_mgr->clks, &dc->current_state->bw_ctx.bw.dcn.clk))
dc->optimized_required = true; dc->clk_optimized_required = true;
// Else we fallback to mem compare. // Else we fallback to mem compare.
} else if (memcmp(&dc->current_state->bw_ctx.bw.dcn.clk, &dc->clk_mgr->clks, offsetof(struct dc_clocks, prev_p_state_change_support)) != 0) { } else if (memcmp(&dc->current_state->bw_ctx.bw.dcn.clk, &dc->clk_mgr->clks, offsetof(struct dc_clocks, prev_p_state_change_support)) != 0) {
dc->optimized_required = true; dc->clk_optimized_required = true;
} }
} }
@@ -2093,18 +2131,14 @@ static void commit_planes_do_stream_update(struct dc *dc,
} }
} }
if (stream_update->dsc_config && dc->hwss.pipe_control_lock_global) {
dc->hwss.pipe_control_lock_global(dc, pipe_ctx, true);
dp_update_dsc_config(pipe_ctx);
dc->hwss.pipe_control_lock_global(dc, pipe_ctx, false);
}
/* Full fe update*/ /* Full fe update*/
if (update_type == UPDATE_TYPE_FAST) if (update_type == UPDATE_TYPE_FAST)
continue; continue;
if (stream_update->dpms_off) { if (stream_update->dsc_config)
dc->hwss.pipe_control_lock(dc, pipe_ctx, true); dp_update_dsc_config(pipe_ctx);
if (stream_update->dpms_off) {
if (*stream_update->dpms_off) { if (*stream_update->dpms_off) {
core_link_disable_stream(pipe_ctx); core_link_disable_stream(pipe_ctx);
/* for dpms, keep acquired resources*/ /* for dpms, keep acquired resources*/
@@ -2118,8 +2152,6 @@ static void commit_planes_do_stream_update(struct dc *dc,
core_link_enable_stream(dc->current_state, pipe_ctx); core_link_enable_stream(dc->current_state, pipe_ctx);
} }
dc->hwss.pipe_control_lock(dc, pipe_ctx, false);
} }
if (stream_update->abm_level && pipe_ctx->stream_res.abm) { if (stream_update->abm_level && pipe_ctx->stream_res.abm) {
@@ -2166,7 +2198,7 @@ static void commit_planes_for_stream(struct dc *dc,
dc->optimize_seamless_boot_streams--; dc->optimize_seamless_boot_streams--;
if (dc->optimize_seamless_boot_streams == 0) if (dc->optimize_seamless_boot_streams == 0)
dc->optimized_required = true; dc->clk_optimized_required = true;
} }
} }
@@ -2175,6 +2207,32 @@ static void commit_planes_for_stream(struct dc *dc,
context_clock_trace(dc, context); context_clock_trace(dc, context);
} }
for (j = 0; j < dc->res_pool->pipe_count; j++) {
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
if (!pipe_ctx->top_pipe &&
!pipe_ctx->prev_odm_pipe &&
pipe_ctx->stream &&
pipe_ctx->stream == stream) {
top_pipe_to_program = pipe_ctx;
}
}
if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed)
if (top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable)
top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable(
top_pipe_to_program->stream_res.tg);
if ((update_type != UPDATE_TYPE_FAST) && dc->hwss.interdependent_update_lock)
dc->hwss.interdependent_update_lock(dc, context, true);
else
/* Lock the top pipe while updating plane addrs, since freesync requires
* plane addr update event triggers to be synchronized.
* top_pipe_to_program is expected to never be NULL
*/
dc->hwss.pipe_control_lock(dc, top_pipe_to_program, true);
// Stream updates // Stream updates
if (stream_update) if (stream_update)
commit_planes_do_stream_update(dc, stream, stream_update, update_type, context); commit_planes_do_stream_update(dc, stream, stream_update, update_type, context);
@@ -2189,6 +2247,12 @@ static void commit_planes_for_stream(struct dc *dc,
if (dc->hwss.program_front_end_for_ctx) if (dc->hwss.program_front_end_for_ctx)
dc->hwss.program_front_end_for_ctx(dc, context); dc->hwss.program_front_end_for_ctx(dc, context);
if ((update_type != UPDATE_TYPE_FAST) && dc->hwss.interdependent_update_lock)
dc->hwss.interdependent_update_lock(dc, context, false);
else
dc->hwss.pipe_control_lock(dc, top_pipe_to_program, false);
dc->hwss.post_unlock_program_front_end(dc, context);
return; return;
} }
@@ -2224,8 +2288,6 @@ static void commit_planes_for_stream(struct dc *dc,
pipe_ctx->stream == stream) { pipe_ctx->stream == stream) {
struct dc_stream_status *stream_status = NULL; struct dc_stream_status *stream_status = NULL;
top_pipe_to_program = pipe_ctx;
if (!pipe_ctx->plane_state) if (!pipe_ctx->plane_state)
continue; continue;
@@ -2270,12 +2332,6 @@ static void commit_planes_for_stream(struct dc *dc,
// Update Type FAST, Surface updates // Update Type FAST, Surface updates
if (update_type == UPDATE_TYPE_FAST) { if (update_type == UPDATE_TYPE_FAST) {
/* Lock the top pipe while updating plane addrs, since freesync requires
* plane addr update event triggers to be synchronized.
* top_pipe_to_program is expected to never be NULL
*/
dc->hwss.pipe_control_lock(dc, top_pipe_to_program, true);
if (dc->hwss.set_flip_control_gsl) if (dc->hwss.set_flip_control_gsl)
for (i = 0; i < surface_count; i++) { for (i = 0; i < surface_count; i++) {
struct dc_plane_state *plane_state = srf_updates[i].surface; struct dc_plane_state *plane_state = srf_updates[i].surface;
@@ -2317,10 +2373,31 @@ static void commit_planes_for_stream(struct dc *dc,
dc->hwss.update_plane_addr(dc, pipe_ctx); dc->hwss.update_plane_addr(dc, pipe_ctx);
} }
} }
dc->hwss.pipe_control_lock(dc, top_pipe_to_program, false);
} }
if ((update_type != UPDATE_TYPE_FAST) && dc->hwss.interdependent_update_lock)
dc->hwss.interdependent_update_lock(dc, context, false);
else
dc->hwss.pipe_control_lock(dc, top_pipe_to_program, false);
if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed)
if (top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable) {
top_pipe_to_program->stream_res.tg->funcs->wait_for_state(
top_pipe_to_program->stream_res.tg,
CRTC_STATE_VACTIVE);
top_pipe_to_program->stream_res.tg->funcs->wait_for_state(
top_pipe_to_program->stream_res.tg,
CRTC_STATE_VBLANK);
top_pipe_to_program->stream_res.tg->funcs->wait_for_state(
top_pipe_to_program->stream_res.tg,
CRTC_STATE_VACTIVE);
top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_disable(
top_pipe_to_program->stream_res.tg);
}
if (update_type != UPDATE_TYPE_FAST)
dc->hwss.post_unlock_program_front_end(dc, context);
// Fire manual trigger only when bottom plane is flipped // Fire manual trigger only when bottom plane is flipped
for (j = 0; j < dc->res_pool->pipe_count; j++) { for (j = 0; j < dc->res_pool->pipe_count; j++) {
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];

View File

@@ -45,7 +45,7 @@
#include "dpcd_defs.h" #include "dpcd_defs.h"
#include "dmcu.h" #include "dmcu.h"
#include "hw/clk_mgr.h" #include "hw/clk_mgr.h"
#include "../dce/dmub_psr.h" #include "dce/dmub_psr.h"
#define DC_LOGGER_INIT(logger) #define DC_LOGGER_INIT(logger)
@@ -599,6 +599,9 @@ static bool detect_dp(
if (sink_caps->transaction_type == DDC_TRANSACTION_TYPE_I2C_OVER_AUX) { if (sink_caps->transaction_type == DDC_TRANSACTION_TYPE_I2C_OVER_AUX) {
sink_caps->signal = SIGNAL_TYPE_DISPLAY_PORT; sink_caps->signal = SIGNAL_TYPE_DISPLAY_PORT;
dpcd_set_source_specific_data(link);
if (!detect_dp_sink_caps(link)) if (!detect_dp_sink_caps(link))
return false; return false;
@@ -769,8 +772,16 @@ static bool dc_link_detect_helper(struct dc_link *link,
if ((link->connector_signal == SIGNAL_TYPE_LVDS || if ((link->connector_signal == SIGNAL_TYPE_LVDS ||
link->connector_signal == SIGNAL_TYPE_EDP) && link->connector_signal == SIGNAL_TYPE_EDP) &&
link->local_sink) link->local_sink) {
// need to re-write OUI and brightness in resume case
if (link->connector_signal == SIGNAL_TYPE_EDP) {
dpcd_set_source_specific_data(link);
dc_link_set_default_brightness_aux(link); //TODO: use cached
}
return true; return true;
}
if (false == dc_link_detect_sink(link, &new_connection_type)) { if (false == dc_link_detect_sink(link, &new_connection_type)) {
BREAK_TO_DEBUGGER(); BREAK_TO_DEBUGGER();
@@ -818,6 +829,10 @@ static bool dc_link_detect_helper(struct dc_link *link,
} }
case SIGNAL_TYPE_EDP: { case SIGNAL_TYPE_EDP: {
read_current_link_settings_on_detect(link);
dpcd_set_source_specific_data(link);
detect_edp_sink_caps(link); detect_edp_sink_caps(link);
read_current_link_settings_on_detect(link); read_current_link_settings_on_detect(link);
sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C_OVER_AUX; sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
@@ -962,6 +977,9 @@ static bool dc_link_detect_helper(struct dc_link *link,
if ((prev_sink != NULL) && ((edid_status == EDID_THE_SAME) || (edid_status == EDID_OK))) if ((prev_sink != NULL) && ((edid_status == EDID_THE_SAME) || (edid_status == EDID_OK)))
same_edid = is_same_edid(&prev_sink->dc_edid, &sink->dc_edid); same_edid = is_same_edid(&prev_sink->dc_edid, &sink->dc_edid);
if (sink->edid_caps.panel_patch.skip_scdc_overwrite)
link->ctx->dc->debug.hdmi20_disable = true;
if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT && if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT &&
sink_caps.transaction_type == DDC_TRANSACTION_TYPE_I2C_OVER_AUX) { sink_caps.transaction_type == DDC_TRANSACTION_TYPE_I2C_OVER_AUX) {
/* /*
@@ -1492,6 +1510,7 @@ static enum dc_status enable_link_dp(
bool fec_enable; bool fec_enable;
int i; int i;
bool apply_seamless_boot_optimization = false; bool apply_seamless_boot_optimization = false;
uint32_t bl_oled_enable_delay = 50; // in ms
// check for seamless boot // check for seamless boot
for (i = 0; i < state->stream_count; i++) { for (i = 0; i < state->stream_count; i++) {
@@ -1515,6 +1534,9 @@ static enum dc_status enable_link_dp(
if (state->clk_mgr && !apply_seamless_boot_optimization) if (state->clk_mgr && !apply_seamless_boot_optimization)
state->clk_mgr->funcs->update_clocks(state->clk_mgr, state, false); state->clk_mgr->funcs->update_clocks(state->clk_mgr, state, false);
// during mode switch we do DP_SET_POWER off then on, and OUI is lost
dpcd_set_source_specific_data(link);
skip_video_pattern = true; skip_video_pattern = true;
if (link_settings.link_rate == LINK_RATE_LOW) if (link_settings.link_rate == LINK_RATE_LOW)
@@ -1538,6 +1560,17 @@ static enum dc_status enable_link_dp(
fec_enable = true; fec_enable = true;
dp_set_fec_enable(link, fec_enable); dp_set_fec_enable(link, fec_enable);
// during mode set we do DP_SET_POWER off then on, aux writes are lost
if (link->dpcd_sink_ext_caps.bits.oled == 1 ||
link->dpcd_sink_ext_caps.bits.sdr_aux_backlight_control == 1 ||
link->dpcd_sink_ext_caps.bits.hdr_aux_backlight_control == 1) {
dc_link_set_default_brightness_aux(link); // TODO: use cached if known
if (link->dpcd_sink_ext_caps.bits.oled == 1)
msleep(bl_oled_enable_delay);
dc_link_backlight_enable_aux(link, true);
}
return status; return status;
} }
@@ -2400,8 +2433,8 @@ bool dc_link_set_psr_allow_active(struct dc_link *link, bool allow_active, bool
struct dmcu *dmcu = dc->res_pool->dmcu; struct dmcu *dmcu = dc->res_pool->dmcu;
struct dmub_psr *psr = dc->res_pool->psr; struct dmub_psr *psr = dc->res_pool->psr;
if ((psr != NULL) && link->psr_feature_enabled) if (psr != NULL && link->psr_feature_enabled)
psr->funcs->set_psr_enable(psr, allow_active); psr->funcs->psr_enable(psr, allow_active);
else if ((dmcu != NULL && dmcu->funcs->is_dmcu_initialized(dmcu)) && link->psr_feature_enabled) else if ((dmcu != NULL && dmcu->funcs->is_dmcu_initialized(dmcu)) && link->psr_feature_enabled)
dmcu->funcs->set_psr_enable(dmcu, allow_active, wait); dmcu->funcs->set_psr_enable(dmcu, allow_active, wait);
@@ -2417,7 +2450,7 @@ bool dc_link_get_psr_state(const struct dc_link *link, uint32_t *psr_state)
struct dmub_psr *psr = dc->res_pool->psr; struct dmub_psr *psr = dc->res_pool->psr;
if (psr != NULL && link->psr_feature_enabled) if (psr != NULL && link->psr_feature_enabled)
psr->funcs->get_psr_state(psr_state); psr->funcs->psr_get_state(psr, psr_state);
else if (dmcu != NULL && link->psr_feature_enabled) else if (dmcu != NULL && link->psr_feature_enabled)
dmcu->funcs->get_psr_state(dmcu, psr_state); dmcu->funcs->get_psr_state(dmcu, psr_state);
@@ -2589,7 +2622,7 @@ bool dc_link_setup_psr(struct dc_link *link,
psr_context->frame_delay = 0; psr_context->frame_delay = 0;
if (psr) if (psr)
link->psr_feature_enabled = psr->funcs->setup_psr(psr, link, psr_context); link->psr_feature_enabled = psr->funcs->psr_copy_settings(psr, link, psr_context);
else else
link->psr_feature_enabled = dmcu->funcs->setup_psr(dmcu, link, psr_context); link->psr_feature_enabled = dmcu->funcs->setup_psr(dmcu, link, psr_context);
@@ -2922,7 +2955,8 @@ static void update_psp_stream_config(struct pipe_ctx *pipe_ctx, bool dpms_off)
memset(&config, 0, sizeof(config)); memset(&config, 0, sizeof(config));
config.otg_inst = (uint8_t) pipe_ctx->stream_res.tg->inst; config.otg_inst = (uint8_t) pipe_ctx->stream_res.tg->inst;
config.stream_enc_inst = (uint8_t) pipe_ctx->stream_res.stream_enc->id; /*stream_enc_inst*/
config.stream_enc_inst = (uint8_t) pipe_ctx->stream_res.stream_enc->stream_enc_inst;
config.link_enc_inst = pipe_ctx->stream->link->link_enc_hw_inst; config.link_enc_inst = pipe_ctx->stream->link->link_enc_hw_inst;
config.dpms_off = dpms_off; config.dpms_off = dpms_off;
config.dm_stream_ctx = pipe_ctx->stream->dm_stream_context; config.dm_stream_ctx = pipe_ctx->stream->dm_stream_context;
@@ -3061,6 +3095,9 @@ void core_link_enable_stream(
dc->hwss.unblank_stream(pipe_ctx, dc->hwss.unblank_stream(pipe_ctx,
&pipe_ctx->stream->link->cur_link_settings); &pipe_ctx->stream->link->cur_link_settings);
if (stream->sink_patches.delay_ignore_msa > 0)
msleep(stream->sink_patches.delay_ignore_msa);
if (dc_is_dp_signal(pipe_ctx->stream->signal)) if (dc_is_dp_signal(pipe_ctx->stream->signal))
enable_stream_features(pipe_ctx); enable_stream_features(pipe_ctx);
#if defined(CONFIG_DRM_AMD_DC_HDCP) #if defined(CONFIG_DRM_AMD_DC_HDCP)

View File

@@ -126,22 +126,16 @@ struct aux_payloads {
struct vector payloads; struct vector payloads;
}; };
static struct i2c_payloads *dal_ddc_i2c_payloads_create(struct dc_context *ctx, uint32_t count) static bool dal_ddc_i2c_payloads_create(
struct dc_context *ctx,
struct i2c_payloads *payloads,
uint32_t count)
{ {
struct i2c_payloads *payloads;
payloads = kzalloc(sizeof(struct i2c_payloads), GFP_KERNEL);
if (!payloads)
return NULL;
if (dal_vector_construct( if (dal_vector_construct(
&payloads->payloads, ctx, count, sizeof(struct i2c_payload))) &payloads->payloads, ctx, count, sizeof(struct i2c_payload)))
return payloads; return true;
kfree(payloads);
return NULL;
return false;
} }
static struct i2c_payload *dal_ddc_i2c_payloads_get(struct i2c_payloads *p) static struct i2c_payload *dal_ddc_i2c_payloads_get(struct i2c_payloads *p)
@@ -154,14 +148,12 @@ static uint32_t dal_ddc_i2c_payloads_get_count(struct i2c_payloads *p)
return p->payloads.count; return p->payloads.count;
} }
static void dal_ddc_i2c_payloads_destroy(struct i2c_payloads **p) static void dal_ddc_i2c_payloads_destroy(struct i2c_payloads *p)
{ {
if (!p || !*p) if (!p)
return; return;
dal_vector_destruct(&(*p)->payloads);
kfree(*p);
*p = NULL;
dal_vector_destruct(&p->payloads);
} }
#define DDC_MIN(a, b) (((a) < (b)) ? (a) : (b)) #define DDC_MIN(a, b) (((a) < (b)) ? (a) : (b))
@@ -524,9 +516,13 @@ bool dal_ddc_service_query_ddc_data(
uint32_t payloads_num = write_payloads + read_payloads; uint32_t payloads_num = write_payloads + read_payloads;
if (write_size > EDID_SEGMENT_SIZE || read_size > EDID_SEGMENT_SIZE) if (write_size > EDID_SEGMENT_SIZE || read_size > EDID_SEGMENT_SIZE)
return false; return false;
if (!payloads_num)
return false;
/*TODO: len of payload data for i2c and aux is uint8!!!!, /*TODO: len of payload data for i2c and aux is uint8!!!!,
* but we want to read 256 over i2c!!!!*/ * but we want to read 256 over i2c!!!!*/
if (dal_ddc_service_is_in_aux_transaction_mode(ddc)) { if (dal_ddc_service_is_in_aux_transaction_mode(ddc)) {
@@ -557,23 +553,25 @@ bool dal_ddc_service_query_ddc_data(
ret = dal_ddc_submit_aux_command(ddc, &payload); ret = dal_ddc_submit_aux_command(ddc, &payload);
} }
} else { } else {
struct i2c_payloads *payloads = struct i2c_command command = {0};
dal_ddc_i2c_payloads_create(ddc->ctx, payloads_num); struct i2c_payloads payloads;
struct i2c_command command = { if (!dal_ddc_i2c_payloads_create(ddc->ctx, &payloads, payloads_num))
.payloads = dal_ddc_i2c_payloads_get(payloads), return false;
.number_of_payloads = 0,
.engine = DDC_I2C_COMMAND_ENGINE, command.payloads = dal_ddc_i2c_payloads_get(&payloads);
.speed = ddc->ctx->dc->caps.i2c_speed_in_khz }; command.number_of_payloads = 0;
command.engine = DDC_I2C_COMMAND_ENGINE;
command.speed = ddc->ctx->dc->caps.i2c_speed_in_khz;
dal_ddc_i2c_payloads_add( dal_ddc_i2c_payloads_add(
payloads, address, write_size, write_buf, true); &payloads, address, write_size, write_buf, true);
dal_ddc_i2c_payloads_add( dal_ddc_i2c_payloads_add(
payloads, address, read_size, read_buf, false); &payloads, address, read_size, read_buf, false);
command.number_of_payloads = command.number_of_payloads =
dal_ddc_i2c_payloads_get_count(payloads); dal_ddc_i2c_payloads_get_count(&payloads);
ret = dm_helpers_submit_i2c( ret = dm_helpers_submit_i2c(
ddc->ctx, ddc->ctx,
@@ -686,6 +684,10 @@ void dal_ddc_service_write_scdc_data(struct ddc_service *ddc_service,
uint8_t write_buffer[2] = {0}; uint8_t write_buffer[2] = {0};
/*Lower than 340 Scramble bit from SCDC caps*/ /*Lower than 340 Scramble bit from SCDC caps*/
if (ddc_service->link->local_sink &&
ddc_service->link->local_sink->edid_caps.panel_patch.skip_scdc_overwrite)
return;
dal_ddc_service_query_ddc_data(ddc_service, slave_address, &offset, dal_ddc_service_query_ddc_data(ddc_service, slave_address, &offset,
sizeof(offset), &sink_version, sizeof(sink_version)); sizeof(offset), &sink_version, sizeof(sink_version));
if (sink_version == 1) { if (sink_version == 1) {
@@ -715,6 +717,10 @@ void dal_ddc_service_read_scdc_data(struct ddc_service *ddc_service)
uint8_t offset = HDMI_SCDC_TMDS_CONFIG; uint8_t offset = HDMI_SCDC_TMDS_CONFIG;
uint8_t tmds_config = 0; uint8_t tmds_config = 0;
if (ddc_service->link->local_sink &&
ddc_service->link->local_sink->edid_caps.panel_patch.skip_scdc_overwrite)
return;
dal_ddc_service_query_ddc_data(ddc_service, slave_address, &offset, dal_ddc_service_query_ddc_data(ddc_service, slave_address, &offset,
sizeof(offset), &tmds_config, sizeof(tmds_config)); sizeof(offset), &tmds_config, sizeof(tmds_config));
if (tmds_config & 0x1) { if (tmds_config & 0x1) {

View File

@@ -945,6 +945,17 @@ static enum link_training_result perform_channel_equalization_sequence(
} }
#define TRAINING_AUX_RD_INTERVAL 100 //us #define TRAINING_AUX_RD_INTERVAL 100 //us
static void start_clock_recovery_pattern_early(struct dc_link *link,
struct link_training_settings *lt_settings,
uint32_t offset)
{
DC_LOG_HW_LINK_TRAINING("%s\n GPU sends TPS1. Wait 400us.\n",
__func__);
dp_set_hw_training_pattern(link, DP_TRAINING_PATTERN_SEQUENCE_1, offset);
dp_set_hw_lane_settings(link, lt_settings, offset);
udelay(400);
}
static enum link_training_result perform_clock_recovery_sequence( static enum link_training_result perform_clock_recovery_sequence(
struct dc_link *link, struct dc_link *link,
struct link_training_settings *lt_settings, struct link_training_settings *lt_settings,
@@ -962,6 +973,7 @@ static enum link_training_result perform_clock_recovery_sequence(
retries_cr = 0; retries_cr = 0;
retry_count = 0; retry_count = 0;
if (!link->ctx->dc->work_arounds.lt_early_cr_pattern)
dp_set_hw_training_pattern(link, tr_pattern, offset); dp_set_hw_training_pattern(link, tr_pattern, offset);
/* najeeb - The synaptics MST hub can put the LT in /* najeeb - The synaptics MST hub can put the LT in
@@ -1435,6 +1447,9 @@ enum link_training_result dc_link_dp_perform_link_training(
&lt_settings); &lt_settings);
/* 1. set link rate, lane count and spread. */ /* 1. set link rate, lane count and spread. */
if (link->ctx->dc->work_arounds.lt_early_cr_pattern)
start_clock_recovery_pattern_early(link, &lt_settings, DPRX);
else
dpcd_set_link_settings(link, &lt_settings); dpcd_set_link_settings(link, &lt_settings);
if (link->preferred_training_settings.fec_enable != NULL) if (link->preferred_training_settings.fec_enable != NULL)
@@ -1654,8 +1669,10 @@ enum link_training_result dc_link_dp_sync_lt_attempt(
dp_set_panel_mode(link, panel_mode); dp_set_panel_mode(link, panel_mode);
/* Attempt to train with given link training settings */ /* Attempt to train with given link training settings */
/* Set link rate, lane count and spread. */ /* Set link rate, lane count and spread. */
if (link->ctx->dc->work_arounds.lt_early_cr_pattern)
start_clock_recovery_pattern_early(link, &lt_settings, DPRX);
else
dpcd_set_link_settings(link, &lt_settings); dpcd_set_link_settings(link, &lt_settings);
/* 2. perform link training (set link training done /* 2. perform link training (set link training done
@@ -1892,6 +1909,16 @@ bool dp_verify_link_cap(
/* disable PHY done possible by BIOS, will be done by driver itself */ /* disable PHY done possible by BIOS, will be done by driver itself */
dp_disable_link_phy(link, link->connector_signal); dp_disable_link_phy(link, link->connector_signal);
dp_cs_id = get_clock_source_id(link);
/* link training starts with the maximum common settings
* supported by both sink and ASIC.
*/
initial_link_settings = get_common_supported_link_settings(
*known_limit_link_setting,
max_link_cap);
cur_link_setting = initial_link_settings;
/* Temporary Renoir-specific workaround for SWDEV-215184; /* Temporary Renoir-specific workaround for SWDEV-215184;
* PHY will sometimes be in bad state on hotplugging display from certain USB-C dongle, * PHY will sometimes be in bad state on hotplugging display from certain USB-C dongle,
* so add extra cycle of enabling and disabling the PHY before first link training. * so add extra cycle of enabling and disabling the PHY before first link training.
@@ -1902,15 +1929,6 @@ bool dp_verify_link_cap(
dp_disable_link_phy(link, link->connector_signal); dp_disable_link_phy(link, link->connector_signal);
} }
dp_cs_id = get_clock_source_id(link);
/* link training starts with the maximum common settings
* supported by both sink and ASIC.
*/
initial_link_settings = get_common_supported_link_settings(
*known_limit_link_setting,
max_link_cap);
cur_link_setting = initial_link_settings;
do { do {
skip_video_pattern = true; skip_video_pattern = true;
@@ -3165,6 +3183,23 @@ static void dp_wa_power_up_0010FA(struct dc_link *link, uint8_t *dpcd_data,
link->wa_flags.dp_keep_receiver_powered = false; link->wa_flags.dp_keep_receiver_powered = false;
} }
/* Read additional sink caps defined in source specific DPCD area
* This function currently only reads from SinkCapability address (DP_SOURCE_SINK_CAP)
*/
static bool dpcd_read_sink_ext_caps(struct dc_link *link)
{
uint8_t dpcd_data;
if (!link)
return false;
if (core_link_read_dpcd(link, DP_SOURCE_SINK_CAP, &dpcd_data, 1) != DC_OK)
return false;
link->dpcd_sink_ext_caps.raw = dpcd_data;
return true;
}
static bool retrieve_link_cap(struct dc_link *link) static bool retrieve_link_cap(struct dc_link *link)
{ {
/* DP_ADAPTER_CAP - DP_DPCD_REV + 1 == 16 and also DP_DSC_BITS_PER_PIXEL_INC - DP_DSC_SUPPORT + 1 == 16, /* DP_ADAPTER_CAP - DP_DPCD_REV + 1 == 16 and also DP_DSC_BITS_PER_PIXEL_INC - DP_DSC_SUPPORT + 1 == 16,
@@ -3437,6 +3472,9 @@ static bool retrieve_link_cap(struct dc_link *link)
sizeof(link->dpcd_caps.dsc_caps.dsc_ext_caps.raw)); sizeof(link->dpcd_caps.dsc_caps.dsc_ext_caps.raw));
} }
if (!dpcd_read_sink_ext_caps(link))
link->dpcd_sink_ext_caps.raw = 0;
/* Connectivity log: detection */ /* Connectivity log: detection */
CONN_DATA_DETECT(link, dpcd_data, sizeof(dpcd_data), "Rx Caps: "); CONN_DATA_DETECT(link, dpcd_data, sizeof(dpcd_data), "Rx Caps: ");
@@ -3589,6 +3627,8 @@ void detect_edp_sink_caps(struct dc_link *link)
} }
} }
link->verified_link_cap = link->reported_link_cap; link->verified_link_cap = link->reported_link_cap;
dc_link_set_default_brightness_aux(link);
} }
void dc_link_dp_enable_hpd(const struct dc_link *link) void dc_link_dp_enable_hpd(const struct dc_link *link)
@@ -3680,7 +3720,7 @@ static void set_crtc_test_pattern(struct dc_link *link,
struct pipe_ctx *odm_pipe; struct pipe_ctx *odm_pipe;
enum controller_dp_color_space controller_color_space; enum controller_dp_color_space controller_color_space;
int opp_cnt = 1; int opp_cnt = 1;
int count; uint16_t count = 0;
switch (test_pattern_color_space) { switch (test_pattern_color_space) {
case DP_TEST_PATTERN_COLOR_SPACE_RGB: case DP_TEST_PATTERN_COLOR_SPACE_RGB:
@@ -4146,3 +4186,148 @@ void dp_set_fec_enable(struct dc_link *link, bool enable)
} }
} }
void dpcd_set_source_specific_data(struct dc_link *link)
{
const uint32_t post_oui_delay = 30; // 30ms
if (!link->dc->vendor_signature.is_valid) {
struct dpcd_amd_signature amd_signature;
amd_signature.AMD_IEEE_TxSignature_byte1 = 0x0;
amd_signature.AMD_IEEE_TxSignature_byte2 = 0x0;
amd_signature.AMD_IEEE_TxSignature_byte3 = 0x1A;
amd_signature.device_id_byte1 =
(uint8_t)(link->ctx->asic_id.chip_id);
amd_signature.device_id_byte2 =
(uint8_t)(link->ctx->asic_id.chip_id >> 8);
memset(&amd_signature.zero, 0, 4);
amd_signature.dce_version =
(uint8_t)(link->ctx->dce_version);
amd_signature.dal_version_byte1 = 0x0; // needed? where to get?
amd_signature.dal_version_byte2 = 0x0; // needed? where to get?
core_link_write_dpcd(link, DP_SOURCE_OUI,
(uint8_t *)(&amd_signature),
sizeof(amd_signature));
} else {
core_link_write_dpcd(link, DP_SOURCE_OUI,
link->dc->vendor_signature.data.raw,
sizeof(link->dc->vendor_signature.data.raw));
}
// Sink may need to configure internals based on vendor, so allow some
// time before proceeding with possibly vendor specific transactions
msleep(post_oui_delay);
}
bool dc_link_set_backlight_level_nits(struct dc_link *link,
bool isHDR,
uint32_t backlight_millinits,
uint32_t transition_time_in_ms)
{
struct dpcd_source_backlight_set dpcd_backlight_set;
uint8_t backlight_control = isHDR ? 1 : 0;
if (!link || (link->connector_signal != SIGNAL_TYPE_EDP &&
link->connector_signal != SIGNAL_TYPE_DISPLAY_PORT))
return false;
// OLEDs have no PWM, they can only use AUX
if (link->dpcd_sink_ext_caps.bits.oled == 1)
backlight_control = 1;
*(uint32_t *)&dpcd_backlight_set.backlight_level_millinits = backlight_millinits;
*(uint16_t *)&dpcd_backlight_set.backlight_transition_time_ms = (uint16_t)transition_time_in_ms;
if (core_link_write_dpcd(link, DP_SOURCE_BACKLIGHT_LEVEL,
(uint8_t *)(&dpcd_backlight_set),
sizeof(dpcd_backlight_set)) != DC_OK)
return false;
if (core_link_write_dpcd(link, DP_SOURCE_BACKLIGHT_CONTROL,
&backlight_control, 1) != DC_OK)
return false;
return true;
}
bool dc_link_get_backlight_level_nits(struct dc_link *link,
uint32_t *backlight_millinits_avg,
uint32_t *backlight_millinits_peak)
{
union dpcd_source_backlight_get dpcd_backlight_get;
memset(&dpcd_backlight_get, 0, sizeof(union dpcd_source_backlight_get));
if (!link || (link->connector_signal != SIGNAL_TYPE_EDP &&
link->connector_signal != SIGNAL_TYPE_DISPLAY_PORT))
return false;
if (!core_link_read_dpcd(link, DP_SOURCE_BACKLIGHT_CURRENT_PEAK,
dpcd_backlight_get.raw,
sizeof(union dpcd_source_backlight_get)))
return false;
*backlight_millinits_avg =
dpcd_backlight_get.bytes.backlight_millinits_avg;
*backlight_millinits_peak =
dpcd_backlight_get.bytes.backlight_millinits_peak;
/* On non-supported panels dpcd_read usually succeeds with 0 returned */
if (*backlight_millinits_avg == 0 ||
*backlight_millinits_avg > *backlight_millinits_peak)
return false;
return true;
}
bool dc_link_backlight_enable_aux(struct dc_link *link, bool enable)
{
uint8_t backlight_enable = enable ? 1 : 0;
if (!link || (link->connector_signal != SIGNAL_TYPE_EDP &&
link->connector_signal != SIGNAL_TYPE_DISPLAY_PORT))
return false;
if (core_link_write_dpcd(link, DP_SOURCE_BACKLIGHT_ENABLE,
&backlight_enable, 1) != DC_OK)
return false;
return true;
}
// we read default from 0x320 because we expect BIOS wrote it there
// regular get_backlight_nit reads from panel set at 0x326
bool dc_link_read_default_bl_aux(struct dc_link *link, uint32_t *backlight_millinits)
{
if (!link || (link->connector_signal != SIGNAL_TYPE_EDP &&
link->connector_signal != SIGNAL_TYPE_DISPLAY_PORT))
return false;
if (!core_link_read_dpcd(link, DP_SOURCE_BACKLIGHT_LEVEL,
(uint8_t *) backlight_millinits,
sizeof(uint32_t)))
return false;
return true;
}
bool dc_link_set_default_brightness_aux(struct dc_link *link)
{
uint32_t default_backlight;
if (link &&
(link->dpcd_sink_ext_caps.bits.hdr_aux_backlight_control == 1 ||
link->dpcd_sink_ext_caps.bits.sdr_aux_backlight_control == 1)) {
if (!dc_link_read_default_bl_aux(link, &default_backlight))
default_backlight = 150000;
// if < 5 nits or > 5000, it might be wrong readback
if (default_backlight < 5000 || default_backlight > 5000000)
default_backlight = 150000; //
return dc_link_set_backlight_level_nits(link, true,
default_backlight, 0);
}
return false;
}

View File

@@ -153,9 +153,9 @@ bool edp_receiver_ready_T9(struct dc_link *link)
unsigned char edpRev = 0; unsigned char edpRev = 0;
enum dc_status result = DC_OK; enum dc_status result = DC_OK;
result = core_link_read_dpcd(link, DP_EDP_DPCD_REV, &edpRev, sizeof(edpRev)); result = core_link_read_dpcd(link, DP_EDP_DPCD_REV, &edpRev, sizeof(edpRev));
if (edpRev < DP_EDP_12)
return true;
/* start from eDP version 1.2, SINK_STAUS indicate the sink is ready.*/ /* start from eDP version 1.2, SINK_STAUS indicate the sink is ready.*/
if (result == DC_OK && edpRev >= DP_EDP_12) {
do { do {
sinkstatus = 1; sinkstatus = 1;
result = core_link_read_dpcd(link, DP_SINK_STATUS, &sinkstatus, sizeof(sinkstatus)); result = core_link_read_dpcd(link, DP_SINK_STATUS, &sinkstatus, sizeof(sinkstatus));
@@ -165,6 +165,7 @@ bool edp_receiver_ready_T9(struct dc_link *link)
break; break;
udelay(100); //MAx T9 udelay(100); //MAx T9
} while (++tries < 50); } while (++tries < 50);
}
if (link->local_sink->edid_caps.panel_patch.extra_delay_backlight_off > 0) if (link->local_sink->edid_caps.panel_patch.extra_delay_backlight_off > 0)
udelay(link->local_sink->edid_caps.panel_patch.extra_delay_backlight_off * 1000); udelay(link->local_sink->edid_caps.panel_patch.extra_delay_backlight_off * 1000);
@@ -183,8 +184,8 @@ bool edp_receiver_ready_T7(struct dc_link *link)
unsigned long long time_taken_in_ns = 0; unsigned long long time_taken_in_ns = 0;
result = core_link_read_dpcd(link, DP_EDP_DPCD_REV, &edpRev, sizeof(edpRev)); result = core_link_read_dpcd(link, DP_EDP_DPCD_REV, &edpRev, sizeof(edpRev));
if (result == DC_OK && edpRev < DP_EDP_12)
return true; if (result == DC_OK && edpRev >= DP_EDP_12) {
/* start from eDP version 1.2, SINK_STAUS indicate the sink is ready.*/ /* start from eDP version 1.2, SINK_STAUS indicate the sink is ready.*/
enter_timestamp = dm_get_timestamp(link->ctx); enter_timestamp = dm_get_timestamp(link->ctx);
do { do {
@@ -198,6 +199,7 @@ bool edp_receiver_ready_T7(struct dc_link *link)
finish_timestamp = dm_get_timestamp(link->ctx); finish_timestamp = dm_get_timestamp(link->ctx);
time_taken_in_ns = dm_get_elapse_time_in_ns(link->ctx, finish_timestamp, enter_timestamp); time_taken_in_ns = dm_get_elapse_time_in_ns(link->ctx, finish_timestamp, enter_timestamp);
} while (time_taken_in_ns < 50 * 1000000); //MAx T7 is 50ms } while (time_taken_in_ns < 50 * 1000000); //MAx T7 is 50ms
}
if (link->local_sink->edid_caps.panel_patch.extra_t7_ms > 0) if (link->local_sink->edid_caps.panel_patch.extra_t7_ms > 0)
udelay(link->local_sink->edid_caps.panel_patch.extra_t7_ms * 1000); udelay(link->local_sink->edid_caps.panel_patch.extra_t7_ms * 1000);

View File

@@ -46,12 +46,12 @@
#include "dce100/dce100_resource.h" #include "dce100/dce100_resource.h"
#include "dce110/dce110_resource.h" #include "dce110/dce110_resource.h"
#include "dce112/dce112_resource.h" #include "dce112/dce112_resource.h"
#include "dce120/dce120_resource.h"
#if defined(CONFIG_DRM_AMD_DC_DCN) #if defined(CONFIG_DRM_AMD_DC_DCN)
#include "dcn10/dcn10_resource.h" #include "dcn10/dcn10_resource.h"
#endif
#include "dcn20/dcn20_resource.h" #include "dcn20/dcn20_resource.h"
#include "dcn21/dcn21_resource.h" #include "dcn21/dcn21_resource.h"
#include "dce120/dce120_resource.h" #endif
#define DC_LOGGER_INIT(logger) #define DC_LOGGER_INIT(logger)
@@ -532,6 +532,51 @@ static inline void get_vp_scan_direction(
*flip_horz_scan_dir = !*flip_horz_scan_dir; *flip_horz_scan_dir = !*flip_horz_scan_dir;
} }
int get_num_odm_splits(struct pipe_ctx *pipe)
{
int odm_split_count = 0;
struct pipe_ctx *next_pipe = pipe->next_odm_pipe;
while (next_pipe) {
odm_split_count++;
next_pipe = next_pipe->next_odm_pipe;
}
pipe = pipe->prev_odm_pipe;
while (pipe) {
odm_split_count++;
pipe = pipe->prev_odm_pipe;
}
return odm_split_count;
}
static void calculate_split_count_and_index(struct pipe_ctx *pipe_ctx, int *split_count, int *split_idx)
{
*split_count = get_num_odm_splits(pipe_ctx);
*split_idx = 0;
if (*split_count == 0) {
/*Check for mpc split*/
struct pipe_ctx *split_pipe = pipe_ctx->top_pipe;
while (split_pipe && split_pipe->plane_state == pipe_ctx->plane_state) {
(*split_idx)++;
(*split_count)++;
split_pipe = split_pipe->top_pipe;
}
split_pipe = pipe_ctx->bottom_pipe;
while (split_pipe && split_pipe->plane_state == pipe_ctx->plane_state) {
(*split_count)++;
split_pipe = split_pipe->bottom_pipe;
}
} else {
/*Get odm split index*/
struct pipe_ctx *split_pipe = pipe_ctx->prev_odm_pipe;
while (split_pipe) {
(*split_idx)++;
split_pipe = split_pipe->prev_odm_pipe;
}
}
}
static void calculate_viewport(struct pipe_ctx *pipe_ctx) static void calculate_viewport(struct pipe_ctx *pipe_ctx)
{ {
const struct dc_plane_state *plane_state = pipe_ctx->plane_state; const struct dc_plane_state *plane_state = pipe_ctx->plane_state;
@@ -541,16 +586,16 @@ static void calculate_viewport(struct pipe_ctx *pipe_ctx)
struct rect clip, dest; struct rect clip, dest;
int vpc_div = (data->format == PIXEL_FORMAT_420BPP8 int vpc_div = (data->format == PIXEL_FORMAT_420BPP8
|| data->format == PIXEL_FORMAT_420BPP10) ? 2 : 1; || data->format == PIXEL_FORMAT_420BPP10) ? 2 : 1;
bool pri_split = pipe_ctx->bottom_pipe && int split_count = 0;
pipe_ctx->bottom_pipe->plane_state == pipe_ctx->plane_state; int split_idx = 0;
bool sec_split = pipe_ctx->top_pipe &&
pipe_ctx->top_pipe->plane_state == pipe_ctx->plane_state;
bool orthogonal_rotation, flip_y_start, flip_x_start; bool orthogonal_rotation, flip_y_start, flip_x_start;
calculate_split_count_and_index(pipe_ctx, &split_count, &split_idx);
if (stream->view_format == VIEW_3D_FORMAT_SIDE_BY_SIDE || if (stream->view_format == VIEW_3D_FORMAT_SIDE_BY_SIDE ||
stream->view_format == VIEW_3D_FORMAT_TOP_AND_BOTTOM) { stream->view_format == VIEW_3D_FORMAT_TOP_AND_BOTTOM) {
pri_split = false; split_count = 0;
sec_split = false; split_idx = 0;
} }
/* The actual clip is an intersection between stream /* The actual clip is an intersection between stream
@@ -609,23 +654,32 @@ static void calculate_viewport(struct pipe_ctx *pipe_ctx)
data->viewport.height = clip.height * surf_src.height / dest.height; data->viewport.height = clip.height * surf_src.height / dest.height;
/* Handle split */ /* Handle split */
if (pri_split || sec_split) { if (split_count) {
/* extra pixels in the division remainder need to go to pipes after
* the extra pixel index minus one(epimo) defined here as:
*/
int epimo = 0;
if (orthogonal_rotation) { if (orthogonal_rotation) {
if (flip_y_start != pri_split) if (flip_y_start)
data->viewport.height /= 2; split_idx = split_count - split_idx;
else {
data->viewport.y += data->viewport.height / 2; epimo = split_count - data->viewport.height % (split_count + 1);
/* Ceil offset pipe */
data->viewport.height = (data->viewport.height + 1) / 2; data->viewport.y += (data->viewport.height / (split_count + 1)) * split_idx;
} if (split_idx > epimo)
data->viewport.y += split_idx - epimo - 1;
data->viewport.height = data->viewport.height / (split_count + 1) + (split_idx > epimo ? 1 : 0);
} else { } else {
if (flip_x_start != pri_split) if (flip_x_start)
data->viewport.width /= 2; split_idx = split_count - split_idx;
else {
data->viewport.x += data->viewport.width / 2; epimo = split_count - data->viewport.width % (split_count + 1);
/* Ceil offset pipe */
data->viewport.width = (data->viewport.width + 1) / 2; data->viewport.x += (data->viewport.width / (split_count + 1)) * split_idx;
} if (split_idx > epimo)
data->viewport.x += split_idx - epimo - 1;
data->viewport.width = data->viewport.width / (split_count + 1) + (split_idx > epimo ? 1 : 0);
} }
} }
@@ -644,58 +698,58 @@ static void calculate_recout(struct pipe_ctx *pipe_ctx)
{ {
const struct dc_plane_state *plane_state = pipe_ctx->plane_state; const struct dc_plane_state *plane_state = pipe_ctx->plane_state;
const struct dc_stream_state *stream = pipe_ctx->stream; const struct dc_stream_state *stream = pipe_ctx->stream;
struct scaler_data *data = &pipe_ctx->plane_res.scl_data;
struct rect surf_clip = plane_state->clip_rect; struct rect surf_clip = plane_state->clip_rect;
bool pri_split = pipe_ctx->bottom_pipe && bool pri_split_tb = pipe_ctx->bottom_pipe &&
pipe_ctx->bottom_pipe->plane_state == pipe_ctx->plane_state; pipe_ctx->bottom_pipe->plane_state == pipe_ctx->plane_state &&
bool sec_split = pipe_ctx->top_pipe && stream->view_format == VIEW_3D_FORMAT_TOP_AND_BOTTOM;
pipe_ctx->top_pipe->plane_state == pipe_ctx->plane_state; bool sec_split_tb = pipe_ctx->top_pipe &&
bool top_bottom_split = stream->view_format == VIEW_3D_FORMAT_TOP_AND_BOTTOM; pipe_ctx->top_pipe->plane_state == pipe_ctx->plane_state &&
stream->view_format == VIEW_3D_FORMAT_TOP_AND_BOTTOM;
int split_count = 0;
int split_idx = 0;
pipe_ctx->plane_res.scl_data.recout.x = stream->dst.x; calculate_split_count_and_index(pipe_ctx, &split_count, &split_idx);
data->recout.x = stream->dst.x;
if (stream->src.x < surf_clip.x) if (stream->src.x < surf_clip.x)
pipe_ctx->plane_res.scl_data.recout.x += (surf_clip.x data->recout.x += (surf_clip.x - stream->src.x) * stream->dst.width
- stream->src.x) * stream->dst.width
/ stream->src.width; / stream->src.width;
pipe_ctx->plane_res.scl_data.recout.width = surf_clip.width * data->recout.width = surf_clip.width * stream->dst.width / stream->src.width;
stream->dst.width / stream->src.width; if (data->recout.width + data->recout.x > stream->dst.x + stream->dst.width)
if (pipe_ctx->plane_res.scl_data.recout.width + pipe_ctx->plane_res.scl_data.recout.x > data->recout.width = stream->dst.x + stream->dst.width - data->recout.x;
stream->dst.x + stream->dst.width)
pipe_ctx->plane_res.scl_data.recout.width =
stream->dst.x + stream->dst.width
- pipe_ctx->plane_res.scl_data.recout.x;
pipe_ctx->plane_res.scl_data.recout.y = stream->dst.y; data->recout.y = stream->dst.y;
if (stream->src.y < surf_clip.y) if (stream->src.y < surf_clip.y)
pipe_ctx->plane_res.scl_data.recout.y += (surf_clip.y data->recout.y += (surf_clip.y - stream->src.y) * stream->dst.height
- stream->src.y) * stream->dst.height
/ stream->src.height; / stream->src.height;
pipe_ctx->plane_res.scl_data.recout.height = surf_clip.height * data->recout.height = surf_clip.height * stream->dst.height / stream->src.height;
stream->dst.height / stream->src.height; if (data->recout.height + data->recout.y > stream->dst.y + stream->dst.height)
if (pipe_ctx->plane_res.scl_data.recout.height + pipe_ctx->plane_res.scl_data.recout.y > data->recout.height = stream->dst.y + stream->dst.height - data->recout.y;
stream->dst.y + stream->dst.height)
pipe_ctx->plane_res.scl_data.recout.height =
stream->dst.y + stream->dst.height
- pipe_ctx->plane_res.scl_data.recout.y;
/* Handle h & v split, handle rotation using viewport */ /* Handle h & v split, handle rotation using viewport */
if (sec_split && top_bottom_split) { if (sec_split_tb) {
pipe_ctx->plane_res.scl_data.recout.y += data->recout.y += data->recout.height / 2;
pipe_ctx->plane_res.scl_data.recout.height / 2;
/* Floor primary pipe, ceil 2ndary pipe */ /* Floor primary pipe, ceil 2ndary pipe */
pipe_ctx->plane_res.scl_data.recout.height = data->recout.height = (data->recout.height + 1) / 2;
(pipe_ctx->plane_res.scl_data.recout.height + 1) / 2; } else if (pri_split_tb)
} else if (pri_split && top_bottom_split) data->recout.height /= 2;
pipe_ctx->plane_res.scl_data.recout.height /= 2; else if (split_count) {
else if (sec_split) { /* extra pixels in the division remainder need to go to pipes after
pipe_ctx->plane_res.scl_data.recout.x += * the extra pixel index minus one(epimo) defined here as:
pipe_ctx->plane_res.scl_data.recout.width / 2; */
/* Ceil offset pipe */ int epimo = split_count - data->recout.width % (split_count + 1);
pipe_ctx->plane_res.scl_data.recout.width =
(pipe_ctx->plane_res.scl_data.recout.width + 1) / 2; /*no recout offset due to odm */
} else if (pri_split) if (!pipe_ctx->next_odm_pipe && !pipe_ctx->prev_odm_pipe) {
pipe_ctx->plane_res.scl_data.recout.width /= 2; data->recout.x += (data->recout.width / (split_count + 1)) * split_idx;
if (split_idx > epimo)
data->recout.x += split_idx - epimo - 1;
}
data->recout.width = data->recout.width / (split_count + 1) + (split_idx > epimo ? 1 : 0);
}
} }
static void calculate_scaling_ratios(struct pipe_ctx *pipe_ctx) static void calculate_scaling_ratios(struct pipe_ctx *pipe_ctx)
@@ -832,6 +886,7 @@ static void calculate_inits_and_adj_vp(struct pipe_ctx *pipe_ctx)
{ {
const struct dc_plane_state *plane_state = pipe_ctx->plane_state; const struct dc_plane_state *plane_state = pipe_ctx->plane_state;
const struct dc_stream_state *stream = pipe_ctx->stream; const struct dc_stream_state *stream = pipe_ctx->stream;
struct pipe_ctx *odm_pipe = pipe_ctx->prev_odm_pipe;
struct scaler_data *data = &pipe_ctx->plane_res.scl_data; struct scaler_data *data = &pipe_ctx->plane_res.scl_data;
struct rect src = pipe_ctx->plane_state->src_rect; struct rect src = pipe_ctx->plane_state->src_rect;
int recout_skip_h, recout_skip_v, surf_size_h, surf_size_v; int recout_skip_h, recout_skip_v, surf_size_h, surf_size_v;
@@ -869,6 +924,12 @@ static void calculate_inits_and_adj_vp(struct pipe_ctx *pipe_ctx)
* stream->dst.width / stream->src.width - * stream->dst.width / stream->src.width -
src.x * plane_state->dst_rect.width / src.width src.x * plane_state->dst_rect.width / src.width
* stream->dst.width / stream->src.width); * stream->dst.width / stream->src.width);
/*modified recout_skip_h calculation due to odm having no recout offset caused by split*/
while (odm_pipe) {
recout_skip_h += odm_pipe->plane_res.scl_data.recout.width + odm_pipe->plane_res.scl_data.recout.x;
odm_pipe = odm_pipe->prev_odm_pipe;
}
recout_skip_v = data->recout.y - (stream->dst.y + (plane_state->dst_rect.y - stream->src.y) recout_skip_v = data->recout.y - (stream->dst.y + (plane_state->dst_rect.y - stream->src.y)
* stream->dst.height / stream->src.height - * stream->dst.height / stream->src.height -
src.y * plane_state->dst_rect.height / src.height src.y * plane_state->dst_rect.height / src.height
@@ -1021,6 +1082,8 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
store_h_border_left + timing->h_border_right; store_h_border_left + timing->h_border_right;
pipe_ctx->plane_res.scl_data.v_active = timing->v_addressable + pipe_ctx->plane_res.scl_data.v_active = timing->v_addressable +
timing->v_border_top + timing->v_border_bottom; timing->v_border_top + timing->v_border_bottom;
if (pipe_ctx->next_odm_pipe || pipe_ctx->prev_odm_pipe)
pipe_ctx->plane_res.scl_data.h_active /= get_num_odm_splits(pipe_ctx) + 1;
/* Taps calculations */ /* Taps calculations */
if (pipe_ctx->plane_res.xfm != NULL) if (pipe_ctx->plane_res.xfm != NULL)
@@ -2034,7 +2097,7 @@ enum dc_status resource_map_pool_resources(
for (i = 0; i < context->stream_count; i++) for (i = 0; i < context->stream_count; i++)
if (context->streams[i] == stream) { if (context->streams[i] == stream) {
context->stream_status[i].primary_otg_inst = pipe_ctx->stream_res.tg->inst; context->stream_status[i].primary_otg_inst = pipe_ctx->stream_res.tg->inst;
context->stream_status[i].stream_enc_inst = pipe_ctx->stream_res.stream_enc->id; context->stream_status[i].stream_enc_inst = pipe_ctx->stream_res.stream_enc->stream_enc_inst;
context->stream_status[i].audio_inst = context->stream_status[i].audio_inst =
pipe_ctx->stream_res.audio ? pipe_ctx->stream_res.audio->inst : -1; pipe_ctx->stream_res.audio ? pipe_ctx->stream_res.audio->inst : -1;

View File

@@ -47,6 +47,9 @@ int dc_setup_system_context(struct dc *dc, struct dc_phy_addr_space_config *pa_c
*/ */
memcpy(&dc->vm_pa_config, pa_config, sizeof(struct dc_phy_addr_space_config)); memcpy(&dc->vm_pa_config, pa_config, sizeof(struct dc_phy_addr_space_config));
dc->vm_pa_config.valid = true; dc->vm_pa_config.valid = true;
if (pa_config->is_hvm_enabled == 0)
dc->debug.nv12_iflip_vm_wa = false;
} }
return num_vmids; return num_vmids;
@@ -62,7 +65,7 @@ int dc_get_vmid_use_vector(struct dc *dc)
int i; int i;
int in_use = 0; int in_use = 0;
for (i = 0; i < dc->vm_helper->num_vmid; i++) for (i = 0; i < MAX_HUBP; i++)
in_use |= dc->vm_helper->hubp_vmid_usage[i].vmid_usage[0] in_use |= dc->vm_helper->hubp_vmid_usage[i].vmid_usage[0]
| dc->vm_helper->hubp_vmid_usage[i].vmid_usage[1]; | dc->vm_helper->hubp_vmid_usage[i].vmid_usage[1];
return in_use; return in_use;

View File

@@ -39,7 +39,7 @@
#include "inc/hw/dmcu.h" #include "inc/hw/dmcu.h"
#include "dml/display_mode_lib.h" #include "dml/display_mode_lib.h"
#define DC_VER "3.2.69" #define DC_VER "3.2.74"
#define MAX_SURFACES 3 #define MAX_SURFACES 3
#define MAX_PLANES 6 #define MAX_PLANES 6
@@ -126,6 +126,7 @@ struct dc_bug_wa {
bool no_connect_phy_config; bool no_connect_phy_config;
bool dedcn20_305_wa; bool dedcn20_305_wa;
bool skip_clock_update; bool skip_clock_update;
bool lt_early_cr_pattern;
}; };
struct dc_dcc_surface_param { struct dc_dcc_surface_param {
@@ -409,6 +410,7 @@ struct dc_debug_options {
bool dmub_offload_enabled; bool dmub_offload_enabled;
bool dmcub_emulation; bool dmcub_emulation;
bool dmub_command_table; /* for testing only */ bool dmub_command_table; /* for testing only */
bool psr_on_dmub;
struct dc_bw_validation_profile bw_val_profile; struct dc_bw_validation_profile bw_val_profile;
bool disable_fec; bool disable_fec;
bool disable_48mhz_pwrdwn; bool disable_48mhz_pwrdwn;
@@ -453,6 +455,7 @@ struct dc_phy_addr_space_config {
} gart_config; } gart_config;
bool valid; bool valid;
bool is_hvm_enabled;
uint64_t page_table_default_page_addr; uint64_t page_table_default_page_addr;
}; };
@@ -517,7 +520,8 @@ struct dc {
struct dce_hwseq *hwseq; struct dce_hwseq *hwseq;
/* Require to optimize clocks and bandwidth for added/removed planes */ /* Require to optimize clocks and bandwidth for added/removed planes */
bool optimized_required; bool clk_optimized_required;
bool wm_optimized_required;
/* Require to maintain clocks and bandwidth for UEFI enabled HW */ /* Require to maintain clocks and bandwidth for UEFI enabled HW */
int optimize_seamless_boot_streams; int optimize_seamless_boot_streams;
@@ -526,6 +530,7 @@ struct dc {
struct compressor *fbc_compressor; struct compressor *fbc_compressor;
struct dc_debug_data debug_data; struct dc_debug_data debug_data;
struct dpcd_vendor_signature vendor_signature;
const char *build_id; const char *build_id;
struct vm_helper *vm_helper; struct vm_helper *vm_helper;
@@ -565,12 +570,14 @@ struct dc_init_data {
struct dc_reg_helper_state *dmub_offload; struct dc_reg_helper_state *dmub_offload;
struct dc_config flags; struct dc_config flags;
uint32_t log_mask; uint64_t log_mask;
/** /**
* gpu_info FW provided soc bounding box struct or 0 if not * gpu_info FW provided soc bounding box struct or 0 if not
* available in FW * available in FW
*/ */
const struct gpu_info_soc_bounding_box_v1_0 *soc_bounding_box; const struct gpu_info_soc_bounding_box_v1_0 *soc_bounding_box;
struct dpcd_vendor_signature vendor_signature;
}; };
struct dc_callback_init { struct dc_callback_init {
@@ -682,7 +689,6 @@ struct dc_3dlut {
struct kref refcount; struct kref refcount;
struct tetrahedral_params lut_3d; struct tetrahedral_params lut_3d;
struct fixed31_32 hdr_multiplier; struct fixed31_32 hdr_multiplier;
bool initialized; /*remove after diag fix*/
union dc_3dlut_state state; union dc_3dlut_state state;
struct dc_context *ctx; struct dc_context *ctx;
}; };
@@ -979,6 +985,20 @@ struct dpcd_caps {
}; };
union dpcd_sink_ext_caps {
struct {
/* 0 - Sink supports backlight adjust via PWM during SDR/HDR mode
* 1 - Sink supports backlight adjust via AUX during SDR/HDR mode.
*/
uint8_t sdr_aux_backlight_control : 1;
uint8_t hdr_aux_backlight_control : 1;
uint8_t reserved_1 : 2;
uint8_t oled : 1;
uint8_t reserved : 3;
} bits;
uint8_t raw;
};
#include "dc_link.h" #include "dc_link.h"
/******************************************************************************* /*******************************************************************************
@@ -1075,7 +1095,6 @@ unsigned int dc_get_current_backlight_pwm(struct dc *dc);
unsigned int dc_get_target_backlight_pwm(struct dc *dc); unsigned int dc_get_target_backlight_pwm(struct dc *dc);
bool dc_is_dmcu_initialized(struct dc *dc); bool dc_is_dmcu_initialized(struct dc *dc);
bool dc_is_hw_initialized(struct dc *dc);
enum dc_status dc_set_clock(struct dc *dc, enum dc_clock_type clock_type, uint32_t clk_khz, uint32_t stepping); enum dc_status dc_set_clock(struct dc *dc, enum dc_clock_type clock_type, uint32_t clk_khz, uint32_t stepping);
void dc_get_clock(struct dc *dc, enum dc_clock_type clock_type, struct dc_clock_config *clock_cfg); void dc_get_clock(struct dc *dc, enum dc_clock_type clock_type, struct dc_clock_config *clock_cfg);

View File

@@ -432,6 +432,54 @@ struct dp_sink_hw_fw_revision {
uint8_t ieee_fw_rev[2]; uint8_t ieee_fw_rev[2];
}; };
struct dpcd_vendor_signature {
bool is_valid;
union dpcd_ieee_vendor_signature {
struct {
uint8_t ieee_oui[3];/*24-bit IEEE OUI*/
uint8_t ieee_device_id[6];/*usually 6-byte ASCII name*/
uint8_t ieee_hw_rev;
uint8_t ieee_fw_rev[2];
};
uint8_t raw[12];
} data;
};
struct dpcd_amd_signature {
uint8_t AMD_IEEE_TxSignature_byte1;
uint8_t AMD_IEEE_TxSignature_byte2;
uint8_t AMD_IEEE_TxSignature_byte3;
uint8_t device_id_byte1;
uint8_t device_id_byte2;
uint8_t zero[4];
uint8_t dce_version;
uint8_t dal_version_byte1;
uint8_t dal_version_byte2;
};
struct dpcd_source_backlight_set {
struct {
uint8_t byte0;
uint8_t byte1;
uint8_t byte2;
uint8_t byte3;
} backlight_level_millinits;
struct {
uint8_t byte0;
uint8_t byte1;
} backlight_transition_time_ms;
};
union dpcd_source_backlight_get {
struct {
uint32_t backlight_millinits_peak; /* 326h */
uint32_t backlight_millinits_avg; /* 32Ah */
} bytes;
uint8_t raw[8];
};
/*DPCD register of DP receiver capability field bits-*/ /*DPCD register of DP receiver capability field bits-*/
union edp_configuration_cap { union edp_configuration_cap {
struct { struct {

View File

@@ -26,6 +26,7 @@
#ifndef DC_LINK_H_ #ifndef DC_LINK_H_
#define DC_LINK_H_ #define DC_LINK_H_
#include "dc.h"
#include "dc_types.h" #include "dc_types.h"
#include "grph_object_defs.h" #include "grph_object_defs.h"
@@ -128,6 +129,7 @@ struct dc_link {
enum edp_revision edp_revision; enum edp_revision edp_revision;
bool psr_feature_enabled; bool psr_feature_enabled;
bool psr_allow_active; bool psr_allow_active;
union dpcd_sink_ext_caps dpcd_sink_ext_caps;
/* MST record stream using this link */ /* MST record stream using this link */
struct link_flags { struct link_flags {
@@ -178,6 +180,21 @@ bool dc_link_set_backlight_level(const struct dc_link *dc_link,
uint32_t backlight_pwm_u16_16, uint32_t backlight_pwm_u16_16,
uint32_t frame_ramp); uint32_t frame_ramp);
/* Set/get nits-based backlight level of an embedded panel (eDP, LVDS). */
bool dc_link_set_backlight_level_nits(struct dc_link *link,
bool isHDR,
uint32_t backlight_millinits,
uint32_t transition_time_in_ms);
bool dc_link_get_backlight_level_nits(struct dc_link *link,
uint32_t *backlight_millinits,
uint32_t *backlight_millinits_peak);
bool dc_link_backlight_enable_aux(struct dc_link *link, bool enable);
bool dc_link_read_default_bl_aux(struct dc_link *link, uint32_t *backlight_millinits);
bool dc_link_set_default_brightness_aux(struct dc_link *link);
int dc_link_get_backlight_level(const struct dc_link *dc_link); int dc_link_get_backlight_level(const struct dc_link *dc_link);
bool dc_link_set_abm_disable(const struct dc_link *dc_link); bool dc_link_set_abm_disable(const struct dc_link *dc_link);

View File

@@ -118,6 +118,7 @@ union stream_update_flags {
uint32_t dpms_off:1; uint32_t dpms_off:1;
uint32_t gamut_remap:1; uint32_t gamut_remap:1;
uint32_t wb_update:1; uint32_t wb_update:1;
uint32_t dsc_changed : 1;
} bits; } bits;
uint32_t raw; uint32_t raw;

View File

@@ -229,7 +229,8 @@ struct dc_panel_patch {
unsigned int extra_t12_ms; unsigned int extra_t12_ms;
unsigned int extra_delay_backlight_off; unsigned int extra_delay_backlight_off;
unsigned int extra_t7_ms; unsigned int extra_t7_ms;
unsigned int manage_secondary_link; unsigned int skip_scdc_overwrite;
unsigned int delay_ignore_msa;
}; };
struct dc_edid_caps { struct dc_edid_caps {

View File

@@ -29,7 +29,7 @@
DCE = dce_audio.o dce_stream_encoder.o dce_link_encoder.o dce_hwseq.o \ DCE = dce_audio.o dce_stream_encoder.o dce_link_encoder.o dce_hwseq.o \
dce_mem_input.o dce_clock_source.o dce_scl_filters.o dce_transform.o \ dce_mem_input.o dce_clock_source.o dce_scl_filters.o dce_transform.o \
dce_opp.o dce_dmcu.o dce_abm.o dce_ipp.o dce_aux.o \ dce_opp.o dce_dmcu.o dce_abm.o dce_ipp.o dce_aux.o \
dce_i2c.o dce_i2c_hw.o dce_i2c_sw.o dce_i2c.o dce_i2c_hw.o dce_i2c_sw.o dmub_psr.o
AMD_DAL_DCE = $(addprefix $(AMDDALPATH)/dc/dce/,$(DCE)) AMD_DAL_DCE = $(addprefix $(AMDDALPATH)/dc/dce/,$(DCE))

View File

@@ -378,6 +378,11 @@ static bool dcn10_dmcu_init(struct dmcu *dmcu)
struct dce_dmcu *dmcu_dce = TO_DCE_DMCU(dmcu); struct dce_dmcu *dmcu_dce = TO_DCE_DMCU(dmcu);
const struct dc_config *config = &dmcu->ctx->dc->config; const struct dc_config *config = &dmcu->ctx->dc->config;
bool status = false; bool status = false;
struct dc_context *ctx = dmcu->ctx;
unsigned int i;
// 5 4 3 2 1 0
// F E D C B A - bit 0 is A, bit 5 is F
unsigned int tx_interrupt_mask = 0;
PERF_TRACE(); PERF_TRACE();
/* Definition of DC_DMCU_SCRATCH /* Definition of DC_DMCU_SCRATCH
@@ -387,6 +392,15 @@ static bool dcn10_dmcu_init(struct dmcu *dmcu)
*/ */
dmcu->dmcu_state = REG_READ(DC_DMCU_SCRATCH); dmcu->dmcu_state = REG_READ(DC_DMCU_SCRATCH);
for (i = 0; i < ctx->dc->link_count; i++) {
if (ctx->dc->links[i]->link_enc->features.flags.bits.DP_IS_USB_C) {
if (ctx->dc->links[i]->link_enc->transmitter >= TRANSMITTER_UNIPHY_A &&
ctx->dc->links[i]->link_enc->transmitter <= TRANSMITTER_UNIPHY_F) {
tx_interrupt_mask |= 1 << ctx->dc->links[i]->link_enc->transmitter;
}
}
}
switch (dmcu->dmcu_state) { switch (dmcu->dmcu_state) {
case DMCU_UNLOADED: case DMCU_UNLOADED:
status = false; status = false;
@@ -401,6 +415,8 @@ static bool dcn10_dmcu_init(struct dmcu *dmcu)
/* Set backlight ramping stepsize */ /* Set backlight ramping stepsize */
REG_WRITE(MASTER_COMM_DATA_REG2, abm_gain_stepsize); REG_WRITE(MASTER_COMM_DATA_REG2, abm_gain_stepsize);
REG_WRITE(MASTER_COMM_DATA_REG3, tx_interrupt_mask);
/* Set command to initialize microcontroller */ /* Set command to initialize microcontroller */
REG_UPDATE(MASTER_COMM_CMD_REG, MASTER_COMM_CMD_REG_BYTE0, REG_UPDATE(MASTER_COMM_CMD_REG, MASTER_COMM_CMD_REG_BYTE0,
MCP_INIT_DMCU); MCP_INIT_DMCU);

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,25 @@
/*
* Copyright 2012-16 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/

View File

@@ -27,25 +27,55 @@
#include "dc.h" #include "dc.h"
#include "dc_dmub_srv.h" #include "dc_dmub_srv.h"
#include "../../dmub/inc/dmub_srv.h" #include "../../dmub/inc/dmub_srv.h"
#include "dmub_fw_state.h" #include "../../dmub/inc/dmub_gpint_cmd.h"
#include "core_types.h" #include "core_types.h"
#include "ipp.h"
#define MAX_PIPES 6 #define MAX_PIPES 6
/** /**
* Get PSR state from firmware. * Get PSR state from firmware.
*/ */
static void dmub_get_psr_state(uint32_t *psr_state) static void dmub_psr_get_state(struct dmub_psr *dmub, uint32_t *psr_state)
{ {
// Not yet implemented struct dmub_srv *srv = dmub->ctx->dmub_srv->dmub;
// Trigger GPINT interrupt from firmware
// Send gpint command and wait for ack
dmub_srv_send_gpint_command(srv, DMUB_GPINT__GET_PSR_STATE, 0, 30);
dmub_srv_get_gpint_response(srv, psr_state);
}
/**
* Set PSR version.
*/
static bool dmub_psr_set_version(struct dmub_psr *dmub, struct dc_stream_state *stream)
{
union dmub_rb_cmd cmd;
struct dc_context *dc = dmub->ctx;
cmd.psr_set_version.header.type = DMUB_CMD__PSR;
cmd.psr_set_version.header.sub_type = DMUB_CMD__PSR_SET_VERSION;
if (stream->psr_version == 0x0) // Unsupported
return false;
else if (stream->psr_version == 0x1)
cmd.psr_set_version.psr_set_version_data.version = PSR_VERSION_1;
else if (stream->psr_version == 0x2)
cmd.psr_set_version.psr_set_version_data.version = PSR_VERSION_2;
cmd.psr_enable.header.payload_bytes = sizeof(struct dmub_cmd_psr_set_version_data);
dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd.psr_enable.header);
dc_dmub_srv_cmd_execute(dc->dmub_srv);
dc_dmub_srv_wait_idle(dc->dmub_srv);
return true;
} }
/** /**
* Enable/Disable PSR. * Enable/Disable PSR.
*/ */
static void dmub_set_psr_enable(struct dmub_psr *dmub, bool enable) static void dmub_psr_enable(struct dmub_psr *dmub, bool enable)
{ {
union dmub_rb_cmd cmd; union dmub_rb_cmd cmd;
struct dc_context *dc = dmub->ctx; struct dc_context *dc = dmub->ctx;
@@ -67,13 +97,13 @@ static void dmub_set_psr_enable(struct dmub_psr *dmub, bool enable)
/** /**
* Set PSR level. * Set PSR level.
*/ */
static void dmub_set_psr_level(struct dmub_psr *dmub, uint16_t psr_level) static void dmub_psr_set_level(struct dmub_psr *dmub, uint16_t psr_level)
{ {
union dmub_rb_cmd cmd; union dmub_rb_cmd cmd;
uint32_t psr_state = 0; uint32_t psr_state = 0;
struct dc_context *dc = dmub->ctx; struct dc_context *dc = dmub->ctx;
dmub_get_psr_state(&psr_state); dmub_psr_get_state(dmub, &psr_state);
if (psr_state == 0) if (psr_state == 0)
return; return;
@@ -91,7 +121,7 @@ static void dmub_set_psr_level(struct dmub_psr *dmub, uint16_t psr_level)
/** /**
* Setup PSR by programming phy registers and sending psr hw context values to firmware. * Setup PSR by programming phy registers and sending psr hw context values to firmware.
*/ */
static bool dmub_setup_psr(struct dmub_psr *dmub, static bool dmub_psr_copy_settings(struct dmub_psr *dmub,
struct dc_link *link, struct dc_link *link,
struct psr_context *psr_context) struct psr_context *psr_context)
{ {
@@ -101,8 +131,9 @@ static bool dmub_setup_psr(struct dmub_psr *dmub,
= &cmd.psr_copy_settings.psr_copy_settings_data; = &cmd.psr_copy_settings.psr_copy_settings_data;
struct pipe_ctx *pipe_ctx = NULL; struct pipe_ctx *pipe_ctx = NULL;
struct resource_context *res_ctx = &link->ctx->dc->current_state->res_ctx; struct resource_context *res_ctx = &link->ctx->dc->current_state->res_ctx;
int i = 0;
for (int i = 0; i < MAX_PIPES; i++) { for (i = 0; i < MAX_PIPES; i++) {
if (res_ctx && if (res_ctx &&
res_ctx->pipe_ctx[i].stream && res_ctx->pipe_ctx[i].stream &&
res_ctx->pipe_ctx[i].stream->link && res_ctx->pipe_ctx[i].stream->link &&
@@ -113,9 +144,11 @@ static bool dmub_setup_psr(struct dmub_psr *dmub,
} }
} }
if (!pipe_ctx || if (!pipe_ctx || !&pipe_ctx->plane_res || !&pipe_ctx->stream_res)
!&pipe_ctx->plane_res || return false;
!&pipe_ctx->stream_res)
// First, set the psr version
if (!dmub_psr_set_version(dmub, pipe_ctx->stream))
return false; return false;
// Program DP DPHY fast training registers // Program DP DPHY fast training registers
@@ -138,10 +171,6 @@ static bool dmub_setup_psr(struct dmub_psr *dmub,
copy_settings_data->mpcc_inst = pipe_ctx->plane_res.mpcc_inst; copy_settings_data->mpcc_inst = pipe_ctx->plane_res.mpcc_inst;
if (pipe_ctx->plane_res.hubp)
copy_settings_data->hubp_inst = pipe_ctx->plane_res.hubp->inst;
else
copy_settings_data->hubp_inst = 0;
if (pipe_ctx->plane_res.dpp) if (pipe_ctx->plane_res.dpp)
copy_settings_data->dpp_inst = pipe_ctx->plane_res.dpp->inst; copy_settings_data->dpp_inst = pipe_ctx->plane_res.dpp->inst;
else else
@@ -157,18 +186,9 @@ static bool dmub_setup_psr(struct dmub_psr *dmub,
// Misc // Misc
copy_settings_data->psr_level = psr_context->psr_level.u32all; copy_settings_data->psr_level = psr_context->psr_level.u32all;
copy_settings_data->hyst_frames = psr_context->timehyst_frames;
copy_settings_data->hyst_lines = psr_context->hyst_lines;
copy_settings_data->phy_type = psr_context->phyType;
copy_settings_data->aux_repeat = psr_context->aux_repeats;
copy_settings_data->smu_optimizations_en = psr_context->allow_smu_optimizations; copy_settings_data->smu_optimizations_en = psr_context->allow_smu_optimizations;
copy_settings_data->skip_wait_for_pll_lock = psr_context->skipPsrWaitForPllLock;
copy_settings_data->frame_delay = psr_context->frame_delay; copy_settings_data->frame_delay = psr_context->frame_delay;
copy_settings_data->smu_phy_id = psr_context->smuPhyId;
copy_settings_data->num_of_controllers = psr_context->numberOfControllers;
copy_settings_data->frame_cap_ind = psr_context->psrFrameCaptureIndicationReq; copy_settings_data->frame_cap_ind = psr_context->psrFrameCaptureIndicationReq;
copy_settings_data->phy_num = psr_context->frame_delay & 0x7;
copy_settings_data->link_rate = psr_context->frame_delay & 0xF;
dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd.psr_copy_settings.header); dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd.psr_copy_settings.header);
dc_dmub_srv_cmd_execute(dc->dmub_srv); dc_dmub_srv_cmd_execute(dc->dmub_srv);
@@ -178,10 +198,10 @@ static bool dmub_setup_psr(struct dmub_psr *dmub,
} }
static const struct dmub_psr_funcs psr_funcs = { static const struct dmub_psr_funcs psr_funcs = {
.set_psr_enable = dmub_set_psr_enable, .psr_copy_settings = dmub_psr_copy_settings,
.setup_psr = dmub_setup_psr, .psr_enable = dmub_psr_enable,
.get_psr_state = dmub_get_psr_state, .psr_get_state = dmub_psr_get_state,
.set_psr_level = dmub_set_psr_level, .psr_set_level = dmub_psr_set_level,
}; };
/** /**

View File

@@ -27,6 +27,7 @@
#define _DMUB_PSR_H_ #define _DMUB_PSR_H_
#include "os_types.h" #include "os_types.h"
#include "dc_link.h"
struct dmub_psr { struct dmub_psr {
struct dc_context *ctx; struct dc_context *ctx;
@@ -34,14 +35,14 @@ struct dmub_psr {
}; };
struct dmub_psr_funcs { struct dmub_psr_funcs {
void (*set_psr_enable)(struct dmub_psr *dmub, bool enable); bool (*psr_copy_settings)(struct dmub_psr *dmub, struct dc_link *link, struct psr_context *psr_context);
bool (*setup_psr)(struct dmub_psr *dmub, struct dc_link *link, struct psr_context *psr_context); void (*psr_enable)(struct dmub_psr *dmub, bool enable);
void (*get_psr_state)(uint32_t *psr_state); void (*psr_get_state)(struct dmub_psr *dmub, uint32_t *psr_state);
void (*set_psr_level)(struct dmub_psr *dmub, uint16_t psr_level); void (*psr_set_level)(struct dmub_psr *dmub, uint16_t psr_level);
}; };
struct dmub_psr *dmub_psr_create(struct dc_context *ctx); struct dmub_psr *dmub_psr_create(struct dc_context *ctx);
void dmub_psr_destroy(struct dmub_psr **dmub); void dmub_psr_destroy(struct dmub_psr **dmub);
#endif /* _DCE_DMUB_H_ */ #endif /* _DMUB_PSR_H_ */

View File

@@ -71,6 +71,8 @@
#define PANEL_POWER_UP_TIMEOUT 300 #define PANEL_POWER_UP_TIMEOUT 300
#define PANEL_POWER_DOWN_TIMEOUT 500 #define PANEL_POWER_DOWN_TIMEOUT 500
#define HPD_CHECK_INTERVAL 10 #define HPD_CHECK_INTERVAL 10
#define OLED_POST_T7_DELAY 100
#define OLED_PRE_T11_DELAY 150
#define CTX \ #define CTX \
hws->ctx hws->ctx
@@ -696,8 +698,10 @@ void dce110_enable_stream(struct pipe_ctx *pipe_ctx)
} }
/*todo: cloned in stream enc, fix*/ /*todo: cloned in stream enc, fix*/
static bool is_panel_backlight_on(struct dce_hwseq *hws) bool dce110_is_panel_backlight_on(struct dc_link *link)
{ {
struct dc_context *ctx = link->ctx;
struct dce_hwseq *hws = ctx->dc->hwseq;
uint32_t value; uint32_t value;
REG_GET(LVTMA_PWRSEQ_CNTL, LVTMA_BLON, &value); REG_GET(LVTMA_PWRSEQ_CNTL, LVTMA_BLON, &value);
@@ -705,11 +709,12 @@ static bool is_panel_backlight_on(struct dce_hwseq *hws)
return value; return value;
} }
static bool is_panel_powered_on(struct dce_hwseq *hws) bool dce110_is_panel_powered_on(struct dc_link *link)
{ {
struct dc_context *ctx = link->ctx;
struct dce_hwseq *hws = ctx->dc->hwseq;
uint32_t pwr_seq_state, dig_on, dig_on_ovrd; uint32_t pwr_seq_state, dig_on, dig_on_ovrd;
REG_GET(LVTMA_PWRSEQ_STATE, LVTMA_PWRSEQ_TARGET_STATE_R, &pwr_seq_state); REG_GET(LVTMA_PWRSEQ_STATE, LVTMA_PWRSEQ_TARGET_STATE_R, &pwr_seq_state);
REG_GET_2(LVTMA_PWRSEQ_CNTL, LVTMA_DIGON, &dig_on, LVTMA_DIGON_OVRD, &dig_on_ovrd); REG_GET_2(LVTMA_PWRSEQ_CNTL, LVTMA_DIGON, &dig_on, LVTMA_DIGON_OVRD, &dig_on_ovrd);
@@ -816,7 +821,7 @@ void dce110_edp_power_control(
return; return;
} }
if (power_up != is_panel_powered_on(hwseq)) { if (power_up != hwseq->funcs.is_panel_powered_on(link)) {
/* Send VBIOS command to prompt eDP panel power */ /* Send VBIOS command to prompt eDP panel power */
if (power_up) { if (power_up) {
unsigned long long current_ts = dm_get_timestamp(ctx); unsigned long long current_ts = dm_get_timestamp(ctx);
@@ -896,7 +901,7 @@ void dce110_edp_backlight_control(
return; return;
} }
if (enable && is_panel_backlight_on(hws)) { if (enable && hws->funcs.is_panel_backlight_on(link)) {
DC_LOG_HW_RESUME_S3( DC_LOG_HW_RESUME_S3(
"%s: panel already powered up. Do nothing.\n", "%s: panel already powered up. Do nothing.\n",
__func__); __func__);
@@ -936,9 +941,21 @@ void dce110_edp_backlight_control(
if (cntl.action == TRANSMITTER_CONTROL_BACKLIGHT_ON) if (cntl.action == TRANSMITTER_CONTROL_BACKLIGHT_ON)
edp_receiver_ready_T7(link); edp_receiver_ready_T7(link);
link_transmitter_control(ctx->dc_bios, &cntl); link_transmitter_control(ctx->dc_bios, &cntl);
if (enable && link->dpcd_sink_ext_caps.bits.oled)
msleep(OLED_POST_T7_DELAY);
if (link->dpcd_sink_ext_caps.bits.oled ||
link->dpcd_sink_ext_caps.bits.hdr_aux_backlight_control == 1 ||
link->dpcd_sink_ext_caps.bits.sdr_aux_backlight_control == 1)
dc_link_backlight_enable_aux(link, enable);
/*edp 1.2*/ /*edp 1.2*/
if (cntl.action == TRANSMITTER_CONTROL_BACKLIGHT_OFF) if (cntl.action == TRANSMITTER_CONTROL_BACKLIGHT_OFF)
edp_receiver_ready_T9(link); edp_receiver_ready_T9(link);
if (!enable && link->dpcd_sink_ext_caps.bits.oled)
msleep(OLED_PRE_T11_DELAY);
} }
void dce110_enable_audio_stream(struct pipe_ctx *pipe_ctx) void dce110_enable_audio_stream(struct pipe_ctx *pipe_ctx)
@@ -2574,17 +2591,6 @@ static void dce110_apply_ctx_for_surface(
if (dc->fbc_compressor) if (dc->fbc_compressor)
dc->fbc_compressor->funcs->disable_fbc(dc->fbc_compressor); dc->fbc_compressor->funcs->disable_fbc(dc->fbc_compressor);
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
struct pipe_ctx *old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
if (stream == pipe_ctx->stream) {
if (!pipe_ctx->top_pipe &&
(pipe_ctx->plane_state || old_pipe_ctx->plane_state))
dc->hwss.pipe_control_lock(dc, pipe_ctx, true);
}
}
for (i = 0; i < dc->res_pool->pipe_count; i++) { for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
@@ -2607,20 +2613,16 @@ static void dce110_apply_ctx_for_surface(
} }
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
struct pipe_ctx *old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
if ((stream == pipe_ctx->stream) &&
(!pipe_ctx->top_pipe) &&
(pipe_ctx->plane_state || old_pipe_ctx->plane_state))
dc->hwss.pipe_control_lock(dc, pipe_ctx, false);
}
if (dc->fbc_compressor) if (dc->fbc_compressor)
enable_fbc(dc, context); enable_fbc(dc, context);
} }
static void dce110_post_unlock_program_front_end(
struct dc *dc,
struct dc_state *context)
{
}
static void dce110_power_down_fe(struct dc *dc, struct pipe_ctx *pipe_ctx) static void dce110_power_down_fe(struct dc *dc, struct pipe_ctx *pipe_ctx)
{ {
struct dce_hwseq *hws = dc->hwseq; struct dce_hwseq *hws = dc->hwseq;
@@ -2722,6 +2724,7 @@ static const struct hw_sequencer_funcs dce110_funcs = {
.init_hw = init_hw, .init_hw = init_hw,
.apply_ctx_to_hw = dce110_apply_ctx_to_hw, .apply_ctx_to_hw = dce110_apply_ctx_to_hw,
.apply_ctx_for_surface = dce110_apply_ctx_for_surface, .apply_ctx_for_surface = dce110_apply_ctx_for_surface,
.post_unlock_program_front_end = dce110_post_unlock_program_front_end,
.update_plane_addr = update_plane_addr, .update_plane_addr = update_plane_addr,
.update_pending_status = dce110_update_pending_status, .update_pending_status = dce110_update_pending_status,
.enable_accelerated_mode = dce110_enable_accelerated_mode, .enable_accelerated_mode = dce110_enable_accelerated_mode,
@@ -2736,6 +2739,7 @@ static const struct hw_sequencer_funcs dce110_funcs = {
.disable_audio_stream = dce110_disable_audio_stream, .disable_audio_stream = dce110_disable_audio_stream,
.disable_plane = dce110_power_down_fe, .disable_plane = dce110_power_down_fe,
.pipe_control_lock = dce_pipe_control_lock, .pipe_control_lock = dce_pipe_control_lock,
.interdependent_update_lock = NULL,
.prepare_bandwidth = dce110_prepare_bandwidth, .prepare_bandwidth = dce110_prepare_bandwidth,
.optimize_bandwidth = dce110_optimize_bandwidth, .optimize_bandwidth = dce110_optimize_bandwidth,
.set_drr = set_drr, .set_drr = set_drr,
@@ -2763,6 +2767,8 @@ static const struct hwseq_private_funcs dce110_private_funcs = {
.disable_stream_gating = NULL, .disable_stream_gating = NULL,
.enable_stream_gating = NULL, .enable_stream_gating = NULL,
.edp_backlight_control = dce110_edp_backlight_control, .edp_backlight_control = dce110_edp_backlight_control,
.is_panel_backlight_on = dce110_is_panel_backlight_on,
.is_panel_powered_on = dce110_is_panel_powered_on,
}; };
void dce110_hw_sequencer_construct(struct dc *dc) void dce110_hw_sequencer_construct(struct dc *dc)

View File

@@ -85,5 +85,9 @@ void dce110_edp_wait_for_hpd_ready(
struct dc_link *link, struct dc_link *link,
bool power_up); bool power_up);
bool dce110_is_panel_backlight_on(struct dc_link *link);
bool dce110_is_panel_powered_on(struct dc_link *link);
#endif /* __DC_HWSS_DCE110_H__ */ #endif /* __DC_HWSS_DCE110_H__ */

View File

@@ -316,6 +316,7 @@ bool cm_helper_translate_curve_to_hw_format(
struct pwl_result_data *rgb_resulted; struct pwl_result_data *rgb_resulted;
struct pwl_result_data *rgb; struct pwl_result_data *rgb;
struct pwl_result_data *rgb_plus_1; struct pwl_result_data *rgb_plus_1;
struct pwl_result_data *rgb_minus_1;
int32_t region_start, region_end; int32_t region_start, region_end;
int32_t i; int32_t i;
@@ -465,9 +466,20 @@ bool cm_helper_translate_curve_to_hw_format(
rgb = rgb_resulted; rgb = rgb_resulted;
rgb_plus_1 = rgb_resulted + 1; rgb_plus_1 = rgb_resulted + 1;
rgb_minus_1 = rgb;
i = 1; i = 1;
while (i != hw_points + 1) { while (i != hw_points + 1) {
if (i >= hw_points - 1) {
if (dc_fixpt_lt(rgb_plus_1->red, rgb->red))
rgb_plus_1->red = dc_fixpt_add(rgb->red, rgb_minus_1->delta_red);
if (dc_fixpt_lt(rgb_plus_1->green, rgb->green))
rgb_plus_1->green = dc_fixpt_add(rgb->green, rgb_minus_1->delta_green);
if (dc_fixpt_lt(rgb_plus_1->blue, rgb->blue))
rgb_plus_1->blue = dc_fixpt_add(rgb->blue, rgb_minus_1->delta_blue);
}
rgb->delta_red = dc_fixpt_sub(rgb_plus_1->red, rgb->red); rgb->delta_red = dc_fixpt_sub(rgb_plus_1->red, rgb->red);
rgb->delta_green = dc_fixpt_sub(rgb_plus_1->green, rgb->green); rgb->delta_green = dc_fixpt_sub(rgb_plus_1->green, rgb->green);
rgb->delta_blue = dc_fixpt_sub(rgb_plus_1->blue, rgb->blue); rgb->delta_blue = dc_fixpt_sub(rgb_plus_1->blue, rgb->blue);
@@ -482,6 +494,7 @@ bool cm_helper_translate_curve_to_hw_format(
} }
++rgb_plus_1; ++rgb_plus_1;
rgb_minus_1 = rgb;
++rgb; ++rgb;
++i; ++i;
} }

View File

@@ -128,8 +128,8 @@ bool hubbub1_verify_allow_pstate_change_high(
* pstate takes around ~100us on linux. Unknown currently as to * pstate takes around ~100us on linux. Unknown currently as to
* why it takes that long on linux * why it takes that long on linux
*/ */
static unsigned int pstate_wait_timeout_us = 200; const unsigned int pstate_wait_timeout_us = 200;
static unsigned int pstate_wait_expected_timeout_us = 40; const unsigned int pstate_wait_expected_timeout_us = 40;
static unsigned int max_sampled_pstate_wait_us; /* data collection */ static unsigned int max_sampled_pstate_wait_us; /* data collection */
static bool forced_pstate_allow; /* help with revert wa */ static bool forced_pstate_allow; /* help with revert wa */
@@ -147,8 +147,9 @@ bool hubbub1_verify_allow_pstate_change_high(
forced_pstate_allow = false; forced_pstate_allow = false;
} }
/* RV2: /* The following table only applies to DCN1 and DCN2,
* dchubbubdebugind, at: 0xB * for newer DCNs, need to consult with HW IP folks to read RTL
* HUBBUB:DCHUBBUB_TEST_ARB_DEBUG10 DCHUBBUBDEBUGIND:0xB
* description * description
* 0: Pipe0 Plane0 Allow Pstate Change * 0: Pipe0 Plane0 Allow Pstate Change
* 1: Pipe0 Plane1 Allow Pstate Change * 1: Pipe0 Plane1 Allow Pstate Change
@@ -181,64 +182,6 @@ bool hubbub1_verify_allow_pstate_change_high(
* 28: WB0 Allow Pstate Change * 28: WB0 Allow Pstate Change
* 29: WB1 Allow Pstate Change * 29: WB1 Allow Pstate Change
* 30: Arbiter's allow_pstate_change * 30: Arbiter's allow_pstate_change
* 31: SOC pstate change request"
*/
/*DCN2.x:
HUBBUB:DCHUBBUB_TEST_ARB_DEBUG10 DCHUBBUBDEBUGIND:0xB
0: Pipe0 Plane0 Allow P-state Change
1: Pipe0 Plane1 Allow P-state Change
2: Pipe0 Cursor0 Allow P-state Change
3: Pipe0 Cursor1 Allow P-state Change
4: Pipe1 Plane0 Allow P-state Change
5: Pipe1 Plane1 Allow P-state Change
6: Pipe1 Cursor0 Allow P-state Change
7: Pipe1 Cursor1 Allow P-state Change
8: Pipe2 Plane0 Allow P-state Change
9: Pipe2 Plane1 Allow P-state Change
10: Pipe2 Cursor0 Allow P-state Change
11: Pipe2 Cursor1 Allow P-state Change
12: Pipe3 Plane0 Allow P-state Change
13: Pipe3 Plane1 Allow P-state Change
14: Pipe3 Cursor0 Allow P-state Change
15: Pipe3 Cursor1 Allow P-state Change
16: Pipe4 Plane0 Allow P-state Change
17: Pipe4 Plane1 Allow P-state Change
18: Pipe4 Cursor0 Allow P-state Change
19: Pipe4 Cursor1 Allow P-state Change
20: Pipe5 Plane0 Allow P-state Change
21: Pipe5 Plane1 Allow P-state Change
22: Pipe5 Cursor0 Allow P-state Change
23: Pipe5 Cursor1 Allow P-state Change
24: Pipe6 Plane0 Allow P-state Change
25: Pipe6 Plane1 Allow P-state Change
26: Pipe6 Cursor0 Allow P-state Change
27: Pipe6 Cursor1 Allow P-state Change
28: WB0 Allow P-state Change
29: WB1 Allow P-state Change
30: Arbiter`s Allow P-state Change
31: SOC P-state Change request
*/
/* RV1:
* dchubbubdebugind, at: 0x7
* description "3-0: Pipe0 cursor0 QOS
* 7-4: Pipe1 cursor0 QOS
* 11-8: Pipe2 cursor0 QOS
* 15-12: Pipe3 cursor0 QOS
* 16: Pipe0 Plane0 Allow Pstate Change
* 17: Pipe1 Plane0 Allow Pstate Change
* 18: Pipe2 Plane0 Allow Pstate Change
* 19: Pipe3 Plane0 Allow Pstate Change
* 20: Pipe0 Plane1 Allow Pstate Change
* 21: Pipe1 Plane1 Allow Pstate Change
* 22: Pipe2 Plane1 Allow Pstate Change
* 23: Pipe3 Plane1 Allow Pstate Change
* 24: Pipe0 cursor0 Allow Pstate Change
* 25: Pipe1 cursor0 Allow Pstate Change
* 26: Pipe2 cursor0 Allow Pstate Change
* 27: Pipe3 cursor0 Allow Pstate Change
* 28: WB0 Allow Pstate Change
* 29: WB1 Allow Pstate Change
* 30: Arbiter's allow_pstate_change
* 31: SOC pstate change request * 31: SOC pstate change request
*/ */
@@ -300,7 +243,7 @@ void hubbub1_wm_change_req_wa(struct hubbub *hubbub)
DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST, 1); DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST, 1);
} }
void hubbub1_program_urgent_watermarks( bool hubbub1_program_urgent_watermarks(
struct hubbub *hubbub, struct hubbub *hubbub,
struct dcn_watermark_set *watermarks, struct dcn_watermark_set *watermarks,
unsigned int refclk_mhz, unsigned int refclk_mhz,
@@ -308,6 +251,7 @@ void hubbub1_program_urgent_watermarks(
{ {
struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub); struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub);
uint32_t prog_wm_value; uint32_t prog_wm_value;
bool wm_pending = false;
/* Repeat for water mark set A, B, C and D. */ /* Repeat for water mark set A, B, C and D. */
/* clock state A */ /* clock state A */
@@ -321,7 +265,8 @@ void hubbub1_program_urgent_watermarks(
DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_A calculated =%d\n" DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_A calculated =%d\n"
"HW register value = 0x%x\n", "HW register value = 0x%x\n",
watermarks->a.urgent_ns, prog_wm_value); watermarks->a.urgent_ns, prog_wm_value);
} } else if (watermarks->a.urgent_ns < hubbub1->watermarks.a.urgent_ns)
wm_pending = true;
if (safe_to_lower || watermarks->a.pte_meta_urgent_ns > hubbub1->watermarks.a.pte_meta_urgent_ns) { if (safe_to_lower || watermarks->a.pte_meta_urgent_ns > hubbub1->watermarks.a.pte_meta_urgent_ns) {
hubbub1->watermarks.a.pte_meta_urgent_ns = watermarks->a.pte_meta_urgent_ns; hubbub1->watermarks.a.pte_meta_urgent_ns = watermarks->a.pte_meta_urgent_ns;
@@ -331,7 +276,8 @@ void hubbub1_program_urgent_watermarks(
DC_LOG_BANDWIDTH_CALCS("PTE_META_URGENCY_WATERMARK_A calculated =%d\n" DC_LOG_BANDWIDTH_CALCS("PTE_META_URGENCY_WATERMARK_A calculated =%d\n"
"HW register value = 0x%x\n", "HW register value = 0x%x\n",
watermarks->a.pte_meta_urgent_ns, prog_wm_value); watermarks->a.pte_meta_urgent_ns, prog_wm_value);
} } else if (watermarks->a.pte_meta_urgent_ns < hubbub1->watermarks.a.pte_meta_urgent_ns)
wm_pending = true;
/* clock state B */ /* clock state B */
if (safe_to_lower || watermarks->b.urgent_ns > hubbub1->watermarks.b.urgent_ns) { if (safe_to_lower || watermarks->b.urgent_ns > hubbub1->watermarks.b.urgent_ns) {
@@ -344,7 +290,8 @@ void hubbub1_program_urgent_watermarks(
DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_B calculated =%d\n" DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_B calculated =%d\n"
"HW register value = 0x%x\n", "HW register value = 0x%x\n",
watermarks->b.urgent_ns, prog_wm_value); watermarks->b.urgent_ns, prog_wm_value);
} } else if (watermarks->b.urgent_ns < hubbub1->watermarks.b.urgent_ns)
wm_pending = true;
if (safe_to_lower || watermarks->b.pte_meta_urgent_ns > hubbub1->watermarks.b.pte_meta_urgent_ns) { if (safe_to_lower || watermarks->b.pte_meta_urgent_ns > hubbub1->watermarks.b.pte_meta_urgent_ns) {
hubbub1->watermarks.b.pte_meta_urgent_ns = watermarks->b.pte_meta_urgent_ns; hubbub1->watermarks.b.pte_meta_urgent_ns = watermarks->b.pte_meta_urgent_ns;
@@ -354,7 +301,8 @@ void hubbub1_program_urgent_watermarks(
DC_LOG_BANDWIDTH_CALCS("PTE_META_URGENCY_WATERMARK_B calculated =%d\n" DC_LOG_BANDWIDTH_CALCS("PTE_META_URGENCY_WATERMARK_B calculated =%d\n"
"HW register value = 0x%x\n", "HW register value = 0x%x\n",
watermarks->b.pte_meta_urgent_ns, prog_wm_value); watermarks->b.pte_meta_urgent_ns, prog_wm_value);
} } else if (watermarks->b.pte_meta_urgent_ns < hubbub1->watermarks.b.pte_meta_urgent_ns)
wm_pending = true;
/* clock state C */ /* clock state C */
if (safe_to_lower || watermarks->c.urgent_ns > hubbub1->watermarks.c.urgent_ns) { if (safe_to_lower || watermarks->c.urgent_ns > hubbub1->watermarks.c.urgent_ns) {
@@ -367,7 +315,8 @@ void hubbub1_program_urgent_watermarks(
DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_C calculated =%d\n" DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_C calculated =%d\n"
"HW register value = 0x%x\n", "HW register value = 0x%x\n",
watermarks->c.urgent_ns, prog_wm_value); watermarks->c.urgent_ns, prog_wm_value);
} } else if (watermarks->c.urgent_ns < hubbub1->watermarks.c.urgent_ns)
wm_pending = true;
if (safe_to_lower || watermarks->c.pte_meta_urgent_ns > hubbub1->watermarks.c.pte_meta_urgent_ns) { if (safe_to_lower || watermarks->c.pte_meta_urgent_ns > hubbub1->watermarks.c.pte_meta_urgent_ns) {
hubbub1->watermarks.c.pte_meta_urgent_ns = watermarks->c.pte_meta_urgent_ns; hubbub1->watermarks.c.pte_meta_urgent_ns = watermarks->c.pte_meta_urgent_ns;
@@ -377,7 +326,8 @@ void hubbub1_program_urgent_watermarks(
DC_LOG_BANDWIDTH_CALCS("PTE_META_URGENCY_WATERMARK_C calculated =%d\n" DC_LOG_BANDWIDTH_CALCS("PTE_META_URGENCY_WATERMARK_C calculated =%d\n"
"HW register value = 0x%x\n", "HW register value = 0x%x\n",
watermarks->c.pte_meta_urgent_ns, prog_wm_value); watermarks->c.pte_meta_urgent_ns, prog_wm_value);
} } else if (watermarks->c.pte_meta_urgent_ns < hubbub1->watermarks.c.pte_meta_urgent_ns)
wm_pending = true;
/* clock state D */ /* clock state D */
if (safe_to_lower || watermarks->d.urgent_ns > hubbub1->watermarks.d.urgent_ns) { if (safe_to_lower || watermarks->d.urgent_ns > hubbub1->watermarks.d.urgent_ns) {
@@ -390,7 +340,8 @@ void hubbub1_program_urgent_watermarks(
DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_D calculated =%d\n" DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_D calculated =%d\n"
"HW register value = 0x%x\n", "HW register value = 0x%x\n",
watermarks->d.urgent_ns, prog_wm_value); watermarks->d.urgent_ns, prog_wm_value);
} } else if (watermarks->d.urgent_ns < hubbub1->watermarks.d.urgent_ns)
wm_pending = true;
if (safe_to_lower || watermarks->d.pte_meta_urgent_ns > hubbub1->watermarks.d.pte_meta_urgent_ns) { if (safe_to_lower || watermarks->d.pte_meta_urgent_ns > hubbub1->watermarks.d.pte_meta_urgent_ns) {
hubbub1->watermarks.d.pte_meta_urgent_ns = watermarks->d.pte_meta_urgent_ns; hubbub1->watermarks.d.pte_meta_urgent_ns = watermarks->d.pte_meta_urgent_ns;
@@ -400,10 +351,13 @@ void hubbub1_program_urgent_watermarks(
DC_LOG_BANDWIDTH_CALCS("PTE_META_URGENCY_WATERMARK_D calculated =%d\n" DC_LOG_BANDWIDTH_CALCS("PTE_META_URGENCY_WATERMARK_D calculated =%d\n"
"HW register value = 0x%x\n", "HW register value = 0x%x\n",
watermarks->d.pte_meta_urgent_ns, prog_wm_value); watermarks->d.pte_meta_urgent_ns, prog_wm_value);
} } else if (watermarks->d.pte_meta_urgent_ns < hubbub1->watermarks.d.pte_meta_urgent_ns)
wm_pending = true;
return wm_pending;
} }
void hubbub1_program_stutter_watermarks( bool hubbub1_program_stutter_watermarks(
struct hubbub *hubbub, struct hubbub *hubbub,
struct dcn_watermark_set *watermarks, struct dcn_watermark_set *watermarks,
unsigned int refclk_mhz, unsigned int refclk_mhz,
@@ -411,6 +365,7 @@ void hubbub1_program_stutter_watermarks(
{ {
struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub); struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub);
uint32_t prog_wm_value; uint32_t prog_wm_value;
bool wm_pending = false;
/* clock state A */ /* clock state A */
if (safe_to_lower || watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns if (safe_to_lower || watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns
@@ -425,7 +380,9 @@ void hubbub1_program_stutter_watermarks(
DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_A calculated =%d\n" DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_A calculated =%d\n"
"HW register value = 0x%x\n", "HW register value = 0x%x\n",
watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value); watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
} } else if (watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns
< hubbub1->watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns)
wm_pending = true;
if (safe_to_lower || watermarks->a.cstate_pstate.cstate_exit_ns if (safe_to_lower || watermarks->a.cstate_pstate.cstate_exit_ns
> hubbub1->watermarks.a.cstate_pstate.cstate_exit_ns) { > hubbub1->watermarks.a.cstate_pstate.cstate_exit_ns) {
@@ -439,7 +396,9 @@ void hubbub1_program_stutter_watermarks(
DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_A calculated =%d\n" DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_A calculated =%d\n"
"HW register value = 0x%x\n", "HW register value = 0x%x\n",
watermarks->a.cstate_pstate.cstate_exit_ns, prog_wm_value); watermarks->a.cstate_pstate.cstate_exit_ns, prog_wm_value);
} } else if (watermarks->a.cstate_pstate.cstate_exit_ns
< hubbub1->watermarks.a.cstate_pstate.cstate_exit_ns)
wm_pending = true;
/* clock state B */ /* clock state B */
if (safe_to_lower || watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns if (safe_to_lower || watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns
@@ -454,7 +413,9 @@ void hubbub1_program_stutter_watermarks(
DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_B calculated =%d\n" DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_B calculated =%d\n"
"HW register value = 0x%x\n", "HW register value = 0x%x\n",
watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value); watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
} } else if (watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns
< hubbub1->watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns)
wm_pending = true;
if (safe_to_lower || watermarks->b.cstate_pstate.cstate_exit_ns if (safe_to_lower || watermarks->b.cstate_pstate.cstate_exit_ns
> hubbub1->watermarks.b.cstate_pstate.cstate_exit_ns) { > hubbub1->watermarks.b.cstate_pstate.cstate_exit_ns) {
@@ -468,7 +429,9 @@ void hubbub1_program_stutter_watermarks(
DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_B calculated =%d\n" DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_B calculated =%d\n"
"HW register value = 0x%x\n", "HW register value = 0x%x\n",
watermarks->b.cstate_pstate.cstate_exit_ns, prog_wm_value); watermarks->b.cstate_pstate.cstate_exit_ns, prog_wm_value);
} } else if (watermarks->b.cstate_pstate.cstate_exit_ns
< hubbub1->watermarks.b.cstate_pstate.cstate_exit_ns)
wm_pending = true;
/* clock state C */ /* clock state C */
if (safe_to_lower || watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns if (safe_to_lower || watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns
@@ -483,7 +446,9 @@ void hubbub1_program_stutter_watermarks(
DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_C calculated =%d\n" DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_C calculated =%d\n"
"HW register value = 0x%x\n", "HW register value = 0x%x\n",
watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value); watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
} } else if (watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns
< hubbub1->watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns)
wm_pending = true;
if (safe_to_lower || watermarks->c.cstate_pstate.cstate_exit_ns if (safe_to_lower || watermarks->c.cstate_pstate.cstate_exit_ns
> hubbub1->watermarks.c.cstate_pstate.cstate_exit_ns) { > hubbub1->watermarks.c.cstate_pstate.cstate_exit_ns) {
@@ -497,7 +462,9 @@ void hubbub1_program_stutter_watermarks(
DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_C calculated =%d\n" DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_C calculated =%d\n"
"HW register value = 0x%x\n", "HW register value = 0x%x\n",
watermarks->c.cstate_pstate.cstate_exit_ns, prog_wm_value); watermarks->c.cstate_pstate.cstate_exit_ns, prog_wm_value);
} } else if (watermarks->c.cstate_pstate.cstate_exit_ns
< hubbub1->watermarks.c.cstate_pstate.cstate_exit_ns)
wm_pending = true;
/* clock state D */ /* clock state D */
if (safe_to_lower || watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns if (safe_to_lower || watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns
@@ -512,7 +479,9 @@ void hubbub1_program_stutter_watermarks(
DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_D calculated =%d\n" DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_D calculated =%d\n"
"HW register value = 0x%x\n", "HW register value = 0x%x\n",
watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value); watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
} } else if (watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns
< hubbub1->watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns)
wm_pending = true;
if (safe_to_lower || watermarks->d.cstate_pstate.cstate_exit_ns if (safe_to_lower || watermarks->d.cstate_pstate.cstate_exit_ns
> hubbub1->watermarks.d.cstate_pstate.cstate_exit_ns) { > hubbub1->watermarks.d.cstate_pstate.cstate_exit_ns) {
@@ -526,11 +495,14 @@ void hubbub1_program_stutter_watermarks(
DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_D calculated =%d\n" DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_D calculated =%d\n"
"HW register value = 0x%x\n", "HW register value = 0x%x\n",
watermarks->d.cstate_pstate.cstate_exit_ns, prog_wm_value); watermarks->d.cstate_pstate.cstate_exit_ns, prog_wm_value);
} } else if (watermarks->d.cstate_pstate.cstate_exit_ns
< hubbub1->watermarks.d.cstate_pstate.cstate_exit_ns)
wm_pending = true;
return wm_pending;
} }
void hubbub1_program_pstate_watermarks( bool hubbub1_program_pstate_watermarks(
struct hubbub *hubbub, struct hubbub *hubbub,
struct dcn_watermark_set *watermarks, struct dcn_watermark_set *watermarks,
unsigned int refclk_mhz, unsigned int refclk_mhz,
@@ -538,6 +510,7 @@ void hubbub1_program_pstate_watermarks(
{ {
struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub); struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub);
uint32_t prog_wm_value; uint32_t prog_wm_value;
bool wm_pending = false;
/* clock state A */ /* clock state A */
if (safe_to_lower || watermarks->a.cstate_pstate.pstate_change_ns if (safe_to_lower || watermarks->a.cstate_pstate.pstate_change_ns
@@ -552,7 +525,9 @@ void hubbub1_program_pstate_watermarks(
DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_A calculated =%d\n" DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_A calculated =%d\n"
"HW register value = 0x%x\n\n", "HW register value = 0x%x\n\n",
watermarks->a.cstate_pstate.pstate_change_ns, prog_wm_value); watermarks->a.cstate_pstate.pstate_change_ns, prog_wm_value);
} } else if (watermarks->a.cstate_pstate.pstate_change_ns
< hubbub1->watermarks.a.cstate_pstate.pstate_change_ns)
wm_pending = true;
/* clock state B */ /* clock state B */
if (safe_to_lower || watermarks->b.cstate_pstate.pstate_change_ns if (safe_to_lower || watermarks->b.cstate_pstate.pstate_change_ns
@@ -567,7 +542,9 @@ void hubbub1_program_pstate_watermarks(
DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_B calculated =%d\n" DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_B calculated =%d\n"
"HW register value = 0x%x\n\n", "HW register value = 0x%x\n\n",
watermarks->b.cstate_pstate.pstate_change_ns, prog_wm_value); watermarks->b.cstate_pstate.pstate_change_ns, prog_wm_value);
} } else if (watermarks->b.cstate_pstate.pstate_change_ns
< hubbub1->watermarks.b.cstate_pstate.pstate_change_ns)
wm_pending = true;
/* clock state C */ /* clock state C */
if (safe_to_lower || watermarks->c.cstate_pstate.pstate_change_ns if (safe_to_lower || watermarks->c.cstate_pstate.pstate_change_ns
@@ -582,7 +559,9 @@ void hubbub1_program_pstate_watermarks(
DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_C calculated =%d\n" DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_C calculated =%d\n"
"HW register value = 0x%x\n\n", "HW register value = 0x%x\n\n",
watermarks->c.cstate_pstate.pstate_change_ns, prog_wm_value); watermarks->c.cstate_pstate.pstate_change_ns, prog_wm_value);
} } else if (watermarks->c.cstate_pstate.pstate_change_ns
< hubbub1->watermarks.c.cstate_pstate.pstate_change_ns)
wm_pending = true;
/* clock state D */ /* clock state D */
if (safe_to_lower || watermarks->d.cstate_pstate.pstate_change_ns if (safe_to_lower || watermarks->d.cstate_pstate.pstate_change_ns
@@ -597,23 +576,33 @@ void hubbub1_program_pstate_watermarks(
DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_D calculated =%d\n" DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_D calculated =%d\n"
"HW register value = 0x%x\n\n", "HW register value = 0x%x\n\n",
watermarks->d.cstate_pstate.pstate_change_ns, prog_wm_value); watermarks->d.cstate_pstate.pstate_change_ns, prog_wm_value);
} } else if (watermarks->d.cstate_pstate.pstate_change_ns
< hubbub1->watermarks.d.cstate_pstate.pstate_change_ns)
wm_pending = true;
return wm_pending;
} }
void hubbub1_program_watermarks( bool hubbub1_program_watermarks(
struct hubbub *hubbub, struct hubbub *hubbub,
struct dcn_watermark_set *watermarks, struct dcn_watermark_set *watermarks,
unsigned int refclk_mhz, unsigned int refclk_mhz,
bool safe_to_lower) bool safe_to_lower)
{ {
struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub); struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub);
bool wm_pending = false;
/* /*
* Need to clamp to max of the register values (i.e. no wrap) * Need to clamp to max of the register values (i.e. no wrap)
* for dcn1, all wm registers are 21-bit wide * for dcn1, all wm registers are 21-bit wide
*/ */
hubbub1_program_urgent_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower); if (hubbub1_program_urgent_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower))
hubbub1_program_stutter_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower); wm_pending = true;
hubbub1_program_pstate_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower);
if (hubbub1_program_stutter_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower))
wm_pending = true;
if (hubbub1_program_pstate_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower))
wm_pending = true;
REG_UPDATE(DCHUBBUB_ARB_SAT_LEVEL, REG_UPDATE(DCHUBBUB_ARB_SAT_LEVEL,
DCHUBBUB_ARB_SAT_LEVEL, 60 * refclk_mhz); DCHUBBUB_ARB_SAT_LEVEL, 60 * refclk_mhz);
@@ -627,6 +616,7 @@ void hubbub1_program_watermarks(
DCHUBBUB_ARB_WATERMARK_CHANGE_DONE_INTERRUPT_DISABLE, 1, DCHUBBUB_ARB_WATERMARK_CHANGE_DONE_INTERRUPT_DISABLE, 1,
DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST, 1); DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST, 1);
#endif #endif
return wm_pending;
} }
void hubbub1_update_dchub( void hubbub1_update_dchub(

View File

@@ -308,7 +308,7 @@ bool hubbub1_verify_allow_pstate_change_high(
void hubbub1_wm_change_req_wa(struct hubbub *hubbub); void hubbub1_wm_change_req_wa(struct hubbub *hubbub);
void hubbub1_program_watermarks( bool hubbub1_program_watermarks(
struct hubbub *hubbub, struct hubbub *hubbub,
struct dcn_watermark_set *watermarks, struct dcn_watermark_set *watermarks,
unsigned int refclk_mhz, unsigned int refclk_mhz,
@@ -331,17 +331,17 @@ void hubbub1_construct(struct hubbub *hubbub,
const struct dcn_hubbub_shift *hubbub_shift, const struct dcn_hubbub_shift *hubbub_shift,
const struct dcn_hubbub_mask *hubbub_mask); const struct dcn_hubbub_mask *hubbub_mask);
void hubbub1_program_urgent_watermarks( bool hubbub1_program_urgent_watermarks(
struct hubbub *hubbub, struct hubbub *hubbub,
struct dcn_watermark_set *watermarks, struct dcn_watermark_set *watermarks,
unsigned int refclk_mhz, unsigned int refclk_mhz,
bool safe_to_lower); bool safe_to_lower);
void hubbub1_program_stutter_watermarks( bool hubbub1_program_stutter_watermarks(
struct hubbub *hubbub, struct hubbub *hubbub,
struct dcn_watermark_set *watermarks, struct dcn_watermark_set *watermarks,
unsigned int refclk_mhz, unsigned int refclk_mhz,
bool safe_to_lower); bool safe_to_lower);
void hubbub1_program_pstate_watermarks( bool hubbub1_program_pstate_watermarks(
struct hubbub *hubbub, struct hubbub *hubbub,
struct dcn_watermark_set *watermarks, struct dcn_watermark_set *watermarks,
unsigned int refclk_mhz, unsigned int refclk_mhz,

View File

@@ -48,8 +48,8 @@
#include "dc_link_dp.h" #include "dc_link_dp.h"
#include "dccg.h" #include "dccg.h"
#include "clk_mgr.h" #include "clk_mgr.h"
#include "link_hwss.h"
#include "dpcd_defs.h"
#include "dsc.h" #include "dsc.h"
#define DC_LOGGER_INIT(logger) #define DC_LOGGER_INIT(logger)
@@ -82,7 +82,7 @@ void print_microsec(struct dc_context *dc_ctx,
us_x10 % frac); us_x10 % frac);
} }
static void dcn10_lock_all_pipes(struct dc *dc, void dcn10_lock_all_pipes(struct dc *dc,
struct dc_state *context, struct dc_state *context,
bool lock) bool lock)
{ {
@@ -93,6 +93,7 @@ static void dcn10_lock_all_pipes(struct dc *dc,
for (i = 0; i < dc->res_pool->pipe_count; i++) { for (i = 0; i < dc->res_pool->pipe_count; i++) {
pipe_ctx = &context->res_ctx.pipe_ctx[i]; pipe_ctx = &context->res_ctx.pipe_ctx[i];
tg = pipe_ctx->stream_res.tg; tg = pipe_ctx->stream_res.tg;
/* /*
* Only lock the top pipe's tg to prevent redundant * Only lock the top pipe's tg to prevent redundant
* (un)locking. Also skip if pipe is disabled. * (un)locking. Also skip if pipe is disabled.
@@ -103,9 +104,9 @@ static void dcn10_lock_all_pipes(struct dc *dc,
continue; continue;
if (lock) if (lock)
tg->funcs->lock(tg); dc->hwss.pipe_control_lock(dc, pipe_ctx, true);
else else
tg->funcs->unlock(tg); dc->hwss.pipe_control_lock(dc, pipe_ctx, false);
} }
} }
@@ -900,6 +901,10 @@ static void dcn10_reset_back_end_for_pipe(
* parent pipe. * parent pipe.
*/ */
if (pipe_ctx->top_pipe == NULL) { if (pipe_ctx->top_pipe == NULL) {
if (pipe_ctx->stream_res.abm)
pipe_ctx->stream_res.abm->funcs->set_abm_immediate_disable(pipe_ctx->stream_res.abm);
pipe_ctx->stream_res.tg->funcs->disable_crtc(pipe_ctx->stream_res.tg); pipe_ctx->stream_res.tg->funcs->disable_crtc(pipe_ctx->stream_res.tg);
pipe_ctx->stream_res.tg->funcs->enable_optc_clock(pipe_ctx->stream_res.tg, false); pipe_ctx->stream_res.tg->funcs->enable_optc_clock(pipe_ctx->stream_res.tg, false);
@@ -1043,7 +1048,7 @@ void dcn10_plane_atomic_disconnect(struct dc *dc, struct pipe_ctx *pipe_ctx)
if (opp != NULL) if (opp != NULL)
opp->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true; opp->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true;
dc->optimized_required = true; dc->clk_optimized_required = true;
if (hubp->funcs->hubp_disconnect) if (hubp->funcs->hubp_disconnect)
hubp->funcs->hubp_disconnect(hubp); hubp->funcs->hubp_disconnect(hubp);
@@ -1094,7 +1099,7 @@ void dcn10_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx)
false); false);
hubp->power_gated = true; hubp->power_gated = true;
dc->optimized_required = false; /* We're powering off, no need to optimize */ dc->clk_optimized_required = false; /* We're powering off, no need to optimize */
hws->funcs.plane_atomic_power_down(dc, hws->funcs.plane_atomic_power_down(dc,
pipe_ctx->plane_res.dpp, pipe_ctx->plane_res.dpp,
@@ -1263,6 +1268,7 @@ void dcn10_init_hw(struct dc *dc)
} }
//Enable ability to power gate / don't force power on permanently //Enable ability to power gate / don't force power on permanently
if (hws->funcs.enable_power_gating_plane)
hws->funcs.enable_power_gating_plane(hws, true); hws->funcs.enable_power_gating_plane(hws, true);
return; return;
@@ -1317,6 +1323,31 @@ void dcn10_init_hw(struct dc *dc)
if (hws->funcs.dsc_pg_control != NULL) if (hws->funcs.dsc_pg_control != NULL)
hws->funcs.dsc_pg_control(hws, res_pool->dscs[i]->inst, false); hws->funcs.dsc_pg_control(hws, res_pool->dscs[i]->inst, false);
/* we want to turn off all dp displays before doing detection */
if (dc->config.power_down_display_on_boot) {
uint8_t dpcd_power_state = '\0';
enum dc_status status = DC_ERROR_UNEXPECTED;
for (i = 0; i < dc->link_count; i++) {
if (dc->links[i]->connector_signal != SIGNAL_TYPE_DISPLAY_PORT)
continue;
/*
* core_link_read_dpcd() will invoke dm_helpers_dp_read_dpcd(),
* which needs to read dpcd info with the help of aconnector.
* If aconnector (dc->links[i]->prev) is NULL, then dpcd status
* cannot be read.
*/
if (dc->links[i]->priv) {
/* if any of the displays are lit up turn them off */
status = core_link_read_dpcd(dc->links[i], DP_SET_POWER,
&dpcd_power_state, sizeof(dpcd_power_state));
if (status == DC_OK && dpcd_power_state == DP_POWER_STATE_D0)
dp_receiver_power_ctrl(dc->links[i], false);
}
}
}
/* If taking control over from VBIOS, we may want to optimize our first /* If taking control over from VBIOS, we may want to optimize our first
* mode set, so we need to skip powering down pipes until we know which * mode set, so we need to skip powering down pipes until we know which
* pipes we want to use. * pipes we want to use.
@@ -1355,7 +1386,7 @@ void dcn10_init_hw(struct dc *dc)
REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0); REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);
} }
if (hws->funcs.enable_power_gating_plane)
hws->funcs.enable_power_gating_plane(dc->hwseq, true); hws->funcs.enable_power_gating_plane(dc->hwseq, true);
if (dc->clk_mgr->funcs->notify_wm_ranges) if (dc->clk_mgr->funcs->notify_wm_ranges)
@@ -1576,7 +1607,7 @@ void dcn10_pipe_control_lock(
/* use TG master update lock to lock everything on the TG /* use TG master update lock to lock everything on the TG
* therefore only top pipe need to lock * therefore only top pipe need to lock
*/ */
if (pipe->top_pipe) if (!pipe || pipe->top_pipe)
return; return;
if (dc->debug.sanity_checks) if (dc->debug.sanity_checks)
@@ -2090,6 +2121,10 @@ void dcn10_get_hdr_visual_confirm_color(
if (top_pipe_ctx->stream->out_transfer_func->tf == TRANSFER_FUNCTION_PQ) { if (top_pipe_ctx->stream->out_transfer_func->tf == TRANSFER_FUNCTION_PQ) {
/* HDR10, ARGB2101010 - set boarder color to red */ /* HDR10, ARGB2101010 - set boarder color to red */
color->color_r_cr = color_value; color->color_r_cr = color_value;
} else if (top_pipe_ctx->stream->out_transfer_func->tf == TRANSFER_FUNCTION_GAMMA22) {
/* FreeSync 2 ARGB2101010 - set boarder color to pink */
color->color_r_cr = color_value;
color->color_b_cb = color_value;
} }
break; break;
case PIXEL_FORMAT_FP16: case PIXEL_FORMAT_FP16:
@@ -2512,12 +2547,17 @@ void dcn10_apply_ctx_for_surface(
int i; int i;
struct timing_generator *tg; struct timing_generator *tg;
uint32_t underflow_check_delay_us; uint32_t underflow_check_delay_us;
bool removed_pipe[4] = { false };
bool interdependent_update = false; bool interdependent_update = false;
struct pipe_ctx *top_pipe_to_program = struct pipe_ctx *top_pipe_to_program =
dcn10_find_top_pipe_for_stream(dc, context, stream); dcn10_find_top_pipe_for_stream(dc, context, stream);
DC_LOGGER_INIT(dc->ctx->logger); DC_LOGGER_INIT(dc->ctx->logger);
// Clear pipe_ctx flag
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
pipe_ctx->update_flags.raw = 0;
}
if (!top_pipe_to_program) if (!top_pipe_to_program)
return; return;
@@ -2531,11 +2571,6 @@ void dcn10_apply_ctx_for_surface(
if (underflow_check_delay_us != 0xFFFFFFFF && hws->funcs.did_underflow_occur) if (underflow_check_delay_us != 0xFFFFFFFF && hws->funcs.did_underflow_occur)
ASSERT(hws->funcs.did_underflow_occur(dc, top_pipe_to_program)); ASSERT(hws->funcs.did_underflow_occur(dc, top_pipe_to_program));
if (interdependent_update)
dcn10_lock_all_pipes(dc, context, true);
else
dcn10_pipe_control_lock(dc, top_pipe_to_program, true);
if (underflow_check_delay_us != 0xFFFFFFFF) if (underflow_check_delay_us != 0xFFFFFFFF)
udelay(underflow_check_delay_us); udelay(underflow_check_delay_us);
@@ -2552,18 +2587,6 @@ void dcn10_apply_ctx_for_surface(
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
struct pipe_ctx *old_pipe_ctx = struct pipe_ctx *old_pipe_ctx =
&dc->current_state->res_ctx.pipe_ctx[i]; &dc->current_state->res_ctx.pipe_ctx[i];
/*
* Powergate reused pipes that are not powergated
* fairly hacky right now, using opp_id as indicator
* TODO: After move dc_post to dc_update, this will
* be removed.
*/
if (pipe_ctx->plane_state && !old_pipe_ctx->plane_state) {
if (old_pipe_ctx->stream_res.tg == tg &&
old_pipe_ctx->plane_res.hubp &&
old_pipe_ctx->plane_res.hubp->opp_id != OPP_ID_INVALID)
dc->hwss.disable_plane(dc, old_pipe_ctx);
}
if ((!pipe_ctx->plane_state || if ((!pipe_ctx->plane_state ||
pipe_ctx->stream_res.tg != old_pipe_ctx->stream_res.tg) && pipe_ctx->stream_res.tg != old_pipe_ctx->stream_res.tg) &&
@@ -2571,7 +2594,7 @@ void dcn10_apply_ctx_for_surface(
old_pipe_ctx->stream_res.tg == tg) { old_pipe_ctx->stream_res.tg == tg) {
hws->funcs.plane_atomic_disconnect(dc, old_pipe_ctx); hws->funcs.plane_atomic_disconnect(dc, old_pipe_ctx);
removed_pipe[i] = true; pipe_ctx->update_flags.bits.disable = 1;
DC_LOG_DC("Reset mpcc for pipe %d\n", DC_LOG_DC("Reset mpcc for pipe %d\n",
old_pipe_ctx->pipe_idx); old_pipe_ctx->pipe_idx);
@@ -2597,21 +2620,35 @@ void dcn10_apply_ctx_for_surface(
&pipe_ctx->dlg_regs, &pipe_ctx->dlg_regs,
&pipe_ctx->ttu_regs); &pipe_ctx->ttu_regs);
} }
}
if (interdependent_update) void dcn10_post_unlock_program_front_end(
dcn10_lock_all_pipes(dc, context, false); struct dc *dc,
else struct dc_state *context)
dcn10_pipe_control_lock(dc, top_pipe_to_program, false); {
int i;
if (num_planes == 0) DC_LOGGER_INIT(dc->ctx->logger);
false_optc_underflow_wa(dc, stream, tg);
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
if (!pipe_ctx->top_pipe &&
!pipe_ctx->prev_odm_pipe &&
pipe_ctx->stream) {
struct timing_generator *tg = pipe_ctx->stream_res.tg;
if (context->stream_status[i].plane_count == 0)
false_optc_underflow_wa(dc, pipe_ctx->stream, tg);
}
}
for (i = 0; i < dc->res_pool->pipe_count; i++) for (i = 0; i < dc->res_pool->pipe_count; i++)
if (removed_pipe[i]) if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable)
dc->hwss.disable_plane(dc, &dc->current_state->res_ctx.pipe_ctx[i]); dc->hwss.disable_plane(dc, &dc->current_state->res_ctx.pipe_ctx[i]);
for (i = 0; i < dc->res_pool->pipe_count; i++) for (i = 0; i < dc->res_pool->pipe_count; i++)
if (removed_pipe[i]) { if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable) {
dc->hwss.optimize_bandwidth(dc, context); dc->hwss.optimize_bandwidth(dc, context);
break; break;
} }
@@ -2656,7 +2693,7 @@ void dcn10_prepare_bandwidth(
false); false);
} }
hubbub->funcs->program_watermarks(hubbub, dc->wm_optimized_required = hubbub->funcs->program_watermarks(hubbub,
&context->bw_ctx.bw.dcn.watermarks, &context->bw_ctx.bw.dcn.watermarks,
dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000, dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000,
true); true);
@@ -2680,19 +2717,30 @@ void dcn10_optimize_bandwidth(
hws->funcs.verify_allow_pstate_change_high(dc); hws->funcs.verify_allow_pstate_change_high(dc);
if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
if (context->stream_count == 0) if (context->stream_count == 0) {
context->bw_ctx.bw.dcn.clk.phyclk_khz = 0; context->bw_ctx.bw.dcn.clk.phyclk_khz = 0;
dc->clk_mgr->funcs->update_clocks( dc->clk_mgr->funcs->update_clocks(
dc->clk_mgr, dc->clk_mgr,
context, context,
true); true);
} else if (dc->clk_optimized_required || IS_DIAG_DC(dc->ctx->dce_environment)) {
dc->clk_mgr->funcs->update_clocks(
dc->clk_mgr,
context,
true);
}
} }
if (dc->wm_optimized_required || IS_DIAG_DC(dc->ctx->dce_environment)) {
hubbub->funcs->program_watermarks(hubbub, hubbub->funcs->program_watermarks(hubbub,
&context->bw_ctx.bw.dcn.watermarks, &context->bw_ctx.bw.dcn.watermarks,
dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000, dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000,
true); true);
}
dc->clk_optimized_required = false;
dc->wm_optimized_required = false;
dcn10_stereo_hw_frame_pack_wa(dc, context); dcn10_stereo_hw_frame_pack_wa(dc, context);
if (dc->debug.pplib_wm_report_mode == WM_REPORT_OVERRIDE) if (dc->debug.pplib_wm_report_mode == WM_REPORT_OVERRIDE)
@@ -2884,6 +2932,7 @@ void dcn10_update_pending_status(struct pipe_ctx *pipe_ctx)
struct dc_plane_state *plane_state = pipe_ctx->plane_state; struct dc_plane_state *plane_state = pipe_ctx->plane_state;
struct timing_generator *tg = pipe_ctx->stream_res.tg; struct timing_generator *tg = pipe_ctx->stream_res.tg;
bool flip_pending; bool flip_pending;
struct dc *dc = plane_state->ctx->dc;
if (plane_state == NULL) if (plane_state == NULL)
return; return;
@@ -2901,6 +2950,19 @@ void dcn10_update_pending_status(struct pipe_ctx *pipe_ctx)
plane_state->status.is_right_eye = plane_state->status.is_right_eye =
!tg->funcs->is_stereo_left_eye(pipe_ctx->stream_res.tg); !tg->funcs->is_stereo_left_eye(pipe_ctx->stream_res.tg);
} }
if (dc->hwseq->wa_state.disallow_self_refresh_during_multi_plane_transition_applied) {
struct dce_hwseq *hwseq = dc->hwseq;
struct timing_generator *tg = dc->res_pool->timing_generators[0];
unsigned int cur_frame = tg->funcs->get_frame_count(tg);
if (cur_frame != hwseq->wa_state.disallow_self_refresh_during_multi_plane_transition_applied_on_frame) {
struct hubbub *hubbub = dc->res_pool->hubbub;
hubbub->funcs->allow_self_refresh_control(hubbub, !dc->debug.disable_stutter);
hwseq->wa_state.disallow_self_refresh_during_multi_plane_transition_applied = false;
}
}
} }
void dcn10_update_dchub(struct dce_hwseq *hws, struct dchub_init_data *dh_data) void dcn10_update_dchub(struct dce_hwseq *hws, struct dchub_init_data *dh_data)

View File

@@ -70,11 +70,18 @@ void dcn10_reset_hw_ctx_wrap(
struct dc *dc, struct dc *dc,
struct dc_state *context); struct dc_state *context);
void dcn10_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx); void dcn10_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx);
void dcn10_lock_all_pipes(
struct dc *dc,
struct dc_state *context,
bool lock);
void dcn10_apply_ctx_for_surface( void dcn10_apply_ctx_for_surface(
struct dc *dc, struct dc *dc,
const struct dc_stream_state *stream, const struct dc_stream_state *stream,
int num_planes, int num_planes,
struct dc_state *context); struct dc_state *context);
void dcn10_post_unlock_program_front_end(
struct dc *dc,
struct dc_state *context);
void dcn10_hubp_pg_control( void dcn10_hubp_pg_control(
struct dce_hwseq *hws, struct dce_hwseq *hws,
unsigned int hubp_inst, unsigned int hubp_inst,

Some files were not shown because too many files have changed in this diff Show More