mirror of
https://github.com/torvalds/linux.git
synced 2024-11-22 20:22:09 +00:00
drm fixes for 5.16-rc8
nouveau: - fencing regression fix i915: - Fix possible uninitialized variable - Fix composite fence seqno icrement on each fence creation amdgpu: - Fencing fix - XGMI fix - VCN regression fix - IP discovery regression fixes - Fix runpm documentation - Suspend/resume fixes - Yellow Carp display fixes - MCLK power management fix - dma-buf fix -----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEEEKbZHaGwW9KfbeusDHTzWXnEhr4FAmHOYw0ACgkQDHTzWXnE hr5uSw/+LUOSTfobUSZxRLwhpD9wIk1i29J6OTxKO8DJHLGW1TlZzOI0QXFp1Ikf oEImQkEr4YVzjmcbgtPSl2v2oI8odrIbvJnps733FereIkfCdiT4Odf+Is6/Gs5m zjLg9EGIJt6TFrgCDuL9yFXWnVELpxmvsKJ+eyUa1NfbT61xSy7TcwRkv5+5gkoJ ZMkuvVo2rgEAKiVA9vlSDjG0r8/ksFhK7hy9w0E5V44xJEmemEPRw9FjOd8Efujc gbSCw5vIBXRPD7kDTwKUw6Y7MKChZ7DFyIF7t0ioez32cCK8MVrmjdD+cHcx77fv EgvqlAbhZAFIo/nb/FGxVHYzlUbxqsZhYsYzX00WROEqgmiLiEirBXM1+6ChqS1C Jicfe+Ko5MXle5MVd9UlgCIdd/St5Bfr77Nejq6U3R697Oskt/1g2nV1adCSTvyv c3Tf9P3C9edzdzT6jnwLCkXCUtyki6w5RBgM4x9R1fP/BFvIOdahhcKilcqli2jx s5HxMIZUYEcR5NNAcpMZFZNnDSGvI5pQWTqD7Gu1lsmyqWyy7GkBDbIjnToDPORn 3Bno2c1OhYanaxDr2pgGKgI1I9mRb0L+jPRRSNgBwgxMrmwixpMJlmCpGbI/AZtD kZK9F8wAHUm/hrWMC7xrGFMHiEEdD4xV3jMz/mAgpGFE8WSZUgA= =WRy6 -----END PGP SIGNATURE----- Merge tag 'drm-fixes-2021-12-31' of git://anongit.freedesktop.org/drm/drm Pull drm fixes from Dave Airlie: "This is a bit bigger than I'd like, however it has two weeks of amdgpu fixes in it, since they missed last week, which was very small. The nouveau regression is probably the biggest fix in here, and it needs to go into 5.15 as well, two i915 fixes, and then a scattering of amdgpu fixes. The biggest fix in there is for a fencing NULL pointer dereference, the rest are pretty minor. For the misc team, I've pulled the two misc fixes manually since I'm not sure what is happening at this time of year! The amdgpu maintainers have the outstanding runpm regression to fix still, they are just working through the last bits of it now. Summary: nouveau: - fencing regression fix i915: - Fix possible uninitialized variable - Fix composite fence seqno icrement on each fence creation amdgpu: - Fencing fix - XGMI fix - VCN regression fix - IP discovery regression fixes - Fix runpm documentation - Suspend/resume fixes - Yellow Carp display fixes - MCLK power management fix - dma-buf fix" * tag 'drm-fixes-2021-12-31' of git://anongit.freedesktop.org/drm/drm: drm/amd/display: Changed pipe split policy to allow for multi-display pipe split drm/amd/display: Fix USB4 null pointer dereference in update_psp_stream_config drm/amd/display: Set optimize_pwr_state for DCN31 drm/amd/display: Send s0i2_rdy in stream_count == 0 optimization drm/amd/display: Added power down for DCN10 drm/amd/display: fix B0 TMDS deepcolor no dislay issue drm/amdgpu: no DC support for headless chips drm/amdgpu: put SMU into proper state on runpm suspending for BOCO capable platform drm/amdgpu: always reset the asic in suspend (v2) drm/amd/pm: skip setting gfx cgpg in the s0ix suspend-resume drm/i915: Increment composite fence seqno drm/i915: Fix possible uninitialized variable in parallel extension drm/amdgpu: fix runpm documentation drm/nouveau: wait for the exclusive fence after the shared ones v2 drm/amdgpu: add support for IP discovery gc_info table v2 drm/amdgpu: When the VCN(1.0) block is suspended, powergating is explicitly enabled drm/amd/pm: Fix xgmi link control on aldebaran drm/amdgpu: introduce new amdgpu_fence object to indicate the job embedded fence drm/amdgpu: fix dropped backing store handling in amdgpu_dma_buf_move_notify
This commit is contained in:
commit
4f3d93c6ea
@ -3166,6 +3166,12 @@ static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev)
|
||||
bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
|
||||
{
|
||||
switch (asic_type) {
|
||||
#ifdef CONFIG_DRM_AMDGPU_SI
|
||||
case CHIP_HAINAN:
|
||||
#endif
|
||||
case CHIP_TOPAZ:
|
||||
/* chips with no display hardware */
|
||||
return false;
|
||||
#if defined(CONFIG_DRM_AMD_DC)
|
||||
case CHIP_TAHITI:
|
||||
case CHIP_PITCAIRN:
|
||||
@ -4461,7 +4467,7 @@ int amdgpu_device_mode1_reset(struct amdgpu_device *adev)
|
||||
int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
|
||||
struct amdgpu_reset_context *reset_context)
|
||||
{
|
||||
int i, j, r = 0;
|
||||
int i, r = 0;
|
||||
struct amdgpu_job *job = NULL;
|
||||
bool need_full_reset =
|
||||
test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
|
||||
@ -4483,15 +4489,8 @@ int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
|
||||
|
||||
/*clear job fence from fence drv to avoid force_completion
|
||||
*leave NULL and vm flush fence in fence drv */
|
||||
for (j = 0; j <= ring->fence_drv.num_fences_mask; j++) {
|
||||
struct dma_fence *old, **ptr;
|
||||
amdgpu_fence_driver_clear_job_fences(ring);
|
||||
|
||||
ptr = &ring->fence_drv.fences[j];
|
||||
old = rcu_dereference_protected(*ptr, 1);
|
||||
if (old && test_bit(AMDGPU_FENCE_FLAG_EMBED_IN_JOB_BIT, &old->flags)) {
|
||||
RCU_INIT_POINTER(*ptr, NULL);
|
||||
}
|
||||
}
|
||||
/* after all hw jobs are reset, hw fence is meaningless, so force_completion */
|
||||
amdgpu_fence_driver_force_completion(ring);
|
||||
}
|
||||
|
@ -526,10 +526,15 @@ void amdgpu_discovery_harvest_ip(struct amdgpu_device *adev)
|
||||
}
|
||||
}
|
||||
|
||||
union gc_info {
|
||||
struct gc_info_v1_0 v1;
|
||||
struct gc_info_v2_0 v2;
|
||||
};
|
||||
|
||||
int amdgpu_discovery_get_gfx_info(struct amdgpu_device *adev)
|
||||
{
|
||||
struct binary_header *bhdr;
|
||||
struct gc_info_v1_0 *gc_info;
|
||||
union gc_info *gc_info;
|
||||
|
||||
if (!adev->mman.discovery_bin) {
|
||||
DRM_ERROR("ip discovery uninitialized\n");
|
||||
@ -537,28 +542,55 @@ int amdgpu_discovery_get_gfx_info(struct amdgpu_device *adev)
|
||||
}
|
||||
|
||||
bhdr = (struct binary_header *)adev->mman.discovery_bin;
|
||||
gc_info = (struct gc_info_v1_0 *)(adev->mman.discovery_bin +
|
||||
gc_info = (union gc_info *)(adev->mman.discovery_bin +
|
||||
le16_to_cpu(bhdr->table_list[GC].offset));
|
||||
|
||||
adev->gfx.config.max_shader_engines = le32_to_cpu(gc_info->gc_num_se);
|
||||
adev->gfx.config.max_cu_per_sh = 2 * (le32_to_cpu(gc_info->gc_num_wgp0_per_sa) +
|
||||
le32_to_cpu(gc_info->gc_num_wgp1_per_sa));
|
||||
adev->gfx.config.max_sh_per_se = le32_to_cpu(gc_info->gc_num_sa_per_se);
|
||||
adev->gfx.config.max_backends_per_se = le32_to_cpu(gc_info->gc_num_rb_per_se);
|
||||
adev->gfx.config.max_texture_channel_caches = le32_to_cpu(gc_info->gc_num_gl2c);
|
||||
adev->gfx.config.max_gprs = le32_to_cpu(gc_info->gc_num_gprs);
|
||||
adev->gfx.config.max_gs_threads = le32_to_cpu(gc_info->gc_num_max_gs_thds);
|
||||
adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gc_info->gc_gs_table_depth);
|
||||
adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gc_info->gc_gsprim_buff_depth);
|
||||
adev->gfx.config.double_offchip_lds_buf = le32_to_cpu(gc_info->gc_double_offchip_lds_buffer);
|
||||
adev->gfx.cu_info.wave_front_size = le32_to_cpu(gc_info->gc_wave_size);
|
||||
adev->gfx.cu_info.max_waves_per_simd = le32_to_cpu(gc_info->gc_max_waves_per_simd);
|
||||
adev->gfx.cu_info.max_scratch_slots_per_cu = le32_to_cpu(gc_info->gc_max_scratch_slots_per_cu);
|
||||
adev->gfx.cu_info.lds_size = le32_to_cpu(gc_info->gc_lds_size);
|
||||
adev->gfx.config.num_sc_per_sh = le32_to_cpu(gc_info->gc_num_sc_per_se) /
|
||||
le32_to_cpu(gc_info->gc_num_sa_per_se);
|
||||
adev->gfx.config.num_packer_per_sc = le32_to_cpu(gc_info->gc_num_packer_per_sc);
|
||||
|
||||
switch (gc_info->v1.header.version_major) {
|
||||
case 1:
|
||||
adev->gfx.config.max_shader_engines = le32_to_cpu(gc_info->v1.gc_num_se);
|
||||
adev->gfx.config.max_cu_per_sh = 2 * (le32_to_cpu(gc_info->v1.gc_num_wgp0_per_sa) +
|
||||
le32_to_cpu(gc_info->v1.gc_num_wgp1_per_sa));
|
||||
adev->gfx.config.max_sh_per_se = le32_to_cpu(gc_info->v1.gc_num_sa_per_se);
|
||||
adev->gfx.config.max_backends_per_se = le32_to_cpu(gc_info->v1.gc_num_rb_per_se);
|
||||
adev->gfx.config.max_texture_channel_caches = le32_to_cpu(gc_info->v1.gc_num_gl2c);
|
||||
adev->gfx.config.max_gprs = le32_to_cpu(gc_info->v1.gc_num_gprs);
|
||||
adev->gfx.config.max_gs_threads = le32_to_cpu(gc_info->v1.gc_num_max_gs_thds);
|
||||
adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gc_info->v1.gc_gs_table_depth);
|
||||
adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gc_info->v1.gc_gsprim_buff_depth);
|
||||
adev->gfx.config.double_offchip_lds_buf = le32_to_cpu(gc_info->v1.gc_double_offchip_lds_buffer);
|
||||
adev->gfx.cu_info.wave_front_size = le32_to_cpu(gc_info->v1.gc_wave_size);
|
||||
adev->gfx.cu_info.max_waves_per_simd = le32_to_cpu(gc_info->v1.gc_max_waves_per_simd);
|
||||
adev->gfx.cu_info.max_scratch_slots_per_cu = le32_to_cpu(gc_info->v1.gc_max_scratch_slots_per_cu);
|
||||
adev->gfx.cu_info.lds_size = le32_to_cpu(gc_info->v1.gc_lds_size);
|
||||
adev->gfx.config.num_sc_per_sh = le32_to_cpu(gc_info->v1.gc_num_sc_per_se) /
|
||||
le32_to_cpu(gc_info->v1.gc_num_sa_per_se);
|
||||
adev->gfx.config.num_packer_per_sc = le32_to_cpu(gc_info->v1.gc_num_packer_per_sc);
|
||||
break;
|
||||
case 2:
|
||||
adev->gfx.config.max_shader_engines = le32_to_cpu(gc_info->v2.gc_num_se);
|
||||
adev->gfx.config.max_cu_per_sh = le32_to_cpu(gc_info->v2.gc_num_cu_per_sh);
|
||||
adev->gfx.config.max_sh_per_se = le32_to_cpu(gc_info->v2.gc_num_sh_per_se);
|
||||
adev->gfx.config.max_backends_per_se = le32_to_cpu(gc_info->v2.gc_num_rb_per_se);
|
||||
adev->gfx.config.max_texture_channel_caches = le32_to_cpu(gc_info->v2.gc_num_tccs);
|
||||
adev->gfx.config.max_gprs = le32_to_cpu(gc_info->v2.gc_num_gprs);
|
||||
adev->gfx.config.max_gs_threads = le32_to_cpu(gc_info->v2.gc_num_max_gs_thds);
|
||||
adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gc_info->v2.gc_gs_table_depth);
|
||||
adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gc_info->v2.gc_gsprim_buff_depth);
|
||||
adev->gfx.config.double_offchip_lds_buf = le32_to_cpu(gc_info->v2.gc_double_offchip_lds_buffer);
|
||||
adev->gfx.cu_info.wave_front_size = le32_to_cpu(gc_info->v2.gc_wave_size);
|
||||
adev->gfx.cu_info.max_waves_per_simd = le32_to_cpu(gc_info->v2.gc_max_waves_per_simd);
|
||||
adev->gfx.cu_info.max_scratch_slots_per_cu = le32_to_cpu(gc_info->v2.gc_max_scratch_slots_per_cu);
|
||||
adev->gfx.cu_info.lds_size = le32_to_cpu(gc_info->v2.gc_lds_size);
|
||||
adev->gfx.config.num_sc_per_sh = le32_to_cpu(gc_info->v2.gc_num_sc_per_se) /
|
||||
le32_to_cpu(gc_info->v2.gc_num_sh_per_se);
|
||||
adev->gfx.config.num_packer_per_sc = le32_to_cpu(gc_info->v2.gc_num_packer_per_sc);
|
||||
break;
|
||||
default:
|
||||
dev_err(adev->dev,
|
||||
"Unhandled GC info table %d.%d\n",
|
||||
gc_info->v1.header.version_major,
|
||||
gc_info->v1.header.version_minor);
|
||||
return -EINVAL;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -384,7 +384,7 @@ amdgpu_dma_buf_move_notify(struct dma_buf_attachment *attach)
|
||||
struct amdgpu_vm_bo_base *bo_base;
|
||||
int r;
|
||||
|
||||
if (bo->tbo.resource->mem_type == TTM_PL_SYSTEM)
|
||||
if (!bo->tbo.resource || bo->tbo.resource->mem_type == TTM_PL_SYSTEM)
|
||||
return;
|
||||
|
||||
r = ttm_bo_validate(&bo->tbo, &placement, &ctx);
|
||||
|
@ -328,10 +328,11 @@ module_param_named(aspm, amdgpu_aspm, int, 0444);
|
||||
|
||||
/**
|
||||
* DOC: runpm (int)
|
||||
* Override for runtime power management control for dGPUs in PX/HG laptops. The amdgpu driver can dynamically power down
|
||||
* the dGPU on PX/HG laptops when it is idle. The default is -1 (auto enable). Setting the value to 0 disables this functionality.
|
||||
* Override for runtime power management control for dGPUs. The amdgpu driver can dynamically power down
|
||||
* the dGPUs when they are idle if supported. The default is -1 (auto enable).
|
||||
* Setting the value to 0 disables this functionality.
|
||||
*/
|
||||
MODULE_PARM_DESC(runpm, "PX runtime pm (2 = force enable with BAMACO, 1 = force enable with BACO, 0 = disable, -1 = PX only default)");
|
||||
MODULE_PARM_DESC(runpm, "PX runtime pm (2 = force enable with BAMACO, 1 = force enable with BACO, 0 = disable, -1 = auto)");
|
||||
module_param_named(runpm, amdgpu_runtime_pm, int, 0444);
|
||||
|
||||
/**
|
||||
@ -2153,7 +2154,10 @@ static int amdgpu_pmops_suspend(struct device *dev)
|
||||
adev->in_s3 = true;
|
||||
r = amdgpu_device_suspend(drm_dev, true);
|
||||
adev->in_s3 = false;
|
||||
|
||||
if (r)
|
||||
return r;
|
||||
if (!adev->in_s0ix)
|
||||
r = amdgpu_asic_reset(adev);
|
||||
return r;
|
||||
}
|
||||
|
||||
@ -2234,12 +2238,27 @@ static int amdgpu_pmops_runtime_suspend(struct device *dev)
|
||||
if (amdgpu_device_supports_px(drm_dev))
|
||||
drm_dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
|
||||
|
||||
/*
|
||||
* By setting mp1_state as PP_MP1_STATE_UNLOAD, MP1 will do some
|
||||
* proper cleanups and put itself into a state ready for PNP. That
|
||||
* can address some random resuming failure observed on BOCO capable
|
||||
* platforms.
|
||||
* TODO: this may be also needed for PX capable platform.
|
||||
*/
|
||||
if (amdgpu_device_supports_boco(drm_dev))
|
||||
adev->mp1_state = PP_MP1_STATE_UNLOAD;
|
||||
|
||||
ret = amdgpu_device_suspend(drm_dev, false);
|
||||
if (ret) {
|
||||
adev->in_runpm = false;
|
||||
if (amdgpu_device_supports_boco(drm_dev))
|
||||
adev->mp1_state = PP_MP1_STATE_NONE;
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (amdgpu_device_supports_boco(drm_dev))
|
||||
adev->mp1_state = PP_MP1_STATE_NONE;
|
||||
|
||||
if (amdgpu_device_supports_px(drm_dev)) {
|
||||
/* Only need to handle PCI state in the driver for ATPX
|
||||
* PCI core handles it for _PR3.
|
||||
|
@ -77,11 +77,13 @@ void amdgpu_fence_slab_fini(void)
|
||||
* Cast helper
|
||||
*/
|
||||
static const struct dma_fence_ops amdgpu_fence_ops;
|
||||
static const struct dma_fence_ops amdgpu_job_fence_ops;
|
||||
static inline struct amdgpu_fence *to_amdgpu_fence(struct dma_fence *f)
|
||||
{
|
||||
struct amdgpu_fence *__f = container_of(f, struct amdgpu_fence, base);
|
||||
|
||||
if (__f->base.ops == &amdgpu_fence_ops)
|
||||
if (__f->base.ops == &amdgpu_fence_ops ||
|
||||
__f->base.ops == &amdgpu_job_fence_ops)
|
||||
return __f;
|
||||
|
||||
return NULL;
|
||||
@ -158,19 +160,18 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f, struct amd
|
||||
}
|
||||
|
||||
seq = ++ring->fence_drv.sync_seq;
|
||||
if (job != NULL && job->job_run_counter) {
|
||||
if (job && job->job_run_counter) {
|
||||
/* reinit seq for resubmitted jobs */
|
||||
fence->seqno = seq;
|
||||
} else {
|
||||
dma_fence_init(fence, &amdgpu_fence_ops,
|
||||
&ring->fence_drv.lock,
|
||||
adev->fence_context + ring->idx,
|
||||
seq);
|
||||
}
|
||||
|
||||
if (job != NULL) {
|
||||
/* mark this fence has a parent job */
|
||||
set_bit(AMDGPU_FENCE_FLAG_EMBED_IN_JOB_BIT, &fence->flags);
|
||||
if (job)
|
||||
dma_fence_init(fence, &amdgpu_job_fence_ops,
|
||||
&ring->fence_drv.lock,
|
||||
adev->fence_context + ring->idx, seq);
|
||||
else
|
||||
dma_fence_init(fence, &amdgpu_fence_ops,
|
||||
&ring->fence_drv.lock,
|
||||
adev->fence_context + ring->idx, seq);
|
||||
}
|
||||
|
||||
amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
|
||||
@ -620,6 +621,25 @@ void amdgpu_fence_driver_hw_init(struct amdgpu_device *adev)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_fence_driver_clear_job_fences - clear job embedded fences of ring
|
||||
*
|
||||
* @ring: fence of the ring to be cleared
|
||||
*
|
||||
*/
|
||||
void amdgpu_fence_driver_clear_job_fences(struct amdgpu_ring *ring)
|
||||
{
|
||||
int i;
|
||||
struct dma_fence *old, **ptr;
|
||||
|
||||
for (i = 0; i <= ring->fence_drv.num_fences_mask; i++) {
|
||||
ptr = &ring->fence_drv.fences[i];
|
||||
old = rcu_dereference_protected(*ptr, 1);
|
||||
if (old && old->ops == &amdgpu_job_fence_ops)
|
||||
RCU_INIT_POINTER(*ptr, NULL);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_fence_driver_force_completion - force signal latest fence of ring
|
||||
*
|
||||
@ -643,16 +663,14 @@ static const char *amdgpu_fence_get_driver_name(struct dma_fence *fence)
|
||||
|
||||
static const char *amdgpu_fence_get_timeline_name(struct dma_fence *f)
|
||||
{
|
||||
struct amdgpu_ring *ring;
|
||||
return (const char *)to_amdgpu_fence(f)->ring->name;
|
||||
}
|
||||
|
||||
if (test_bit(AMDGPU_FENCE_FLAG_EMBED_IN_JOB_BIT, &f->flags)) {
|
||||
struct amdgpu_job *job = container_of(f, struct amdgpu_job, hw_fence);
|
||||
static const char *amdgpu_job_fence_get_timeline_name(struct dma_fence *f)
|
||||
{
|
||||
struct amdgpu_job *job = container_of(f, struct amdgpu_job, hw_fence);
|
||||
|
||||
ring = to_amdgpu_ring(job->base.sched);
|
||||
} else {
|
||||
ring = to_amdgpu_fence(f)->ring;
|
||||
}
|
||||
return (const char *)ring->name;
|
||||
return (const char *)to_amdgpu_ring(job->base.sched)->name;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -665,18 +683,25 @@ static const char *amdgpu_fence_get_timeline_name(struct dma_fence *f)
|
||||
*/
|
||||
static bool amdgpu_fence_enable_signaling(struct dma_fence *f)
|
||||
{
|
||||
struct amdgpu_ring *ring;
|
||||
if (!timer_pending(&to_amdgpu_fence(f)->ring->fence_drv.fallback_timer))
|
||||
amdgpu_fence_schedule_fallback(to_amdgpu_fence(f)->ring);
|
||||
|
||||
if (test_bit(AMDGPU_FENCE_FLAG_EMBED_IN_JOB_BIT, &f->flags)) {
|
||||
struct amdgpu_job *job = container_of(f, struct amdgpu_job, hw_fence);
|
||||
return true;
|
||||
}
|
||||
|
||||
ring = to_amdgpu_ring(job->base.sched);
|
||||
} else {
|
||||
ring = to_amdgpu_fence(f)->ring;
|
||||
}
|
||||
/**
|
||||
* amdgpu_job_fence_enable_signaling - enable signalling on job fence
|
||||
* @f: fence
|
||||
*
|
||||
* This is the simliar function with amdgpu_fence_enable_signaling above, it
|
||||
* only handles the job embedded fence.
|
||||
*/
|
||||
static bool amdgpu_job_fence_enable_signaling(struct dma_fence *f)
|
||||
{
|
||||
struct amdgpu_job *job = container_of(f, struct amdgpu_job, hw_fence);
|
||||
|
||||
if (!timer_pending(&ring->fence_drv.fallback_timer))
|
||||
amdgpu_fence_schedule_fallback(ring);
|
||||
if (!timer_pending(&to_amdgpu_ring(job->base.sched)->fence_drv.fallback_timer))
|
||||
amdgpu_fence_schedule_fallback(to_amdgpu_ring(job->base.sched));
|
||||
|
||||
return true;
|
||||
}
|
||||
@ -692,19 +717,23 @@ static void amdgpu_fence_free(struct rcu_head *rcu)
|
||||
{
|
||||
struct dma_fence *f = container_of(rcu, struct dma_fence, rcu);
|
||||
|
||||
if (test_bit(AMDGPU_FENCE_FLAG_EMBED_IN_JOB_BIT, &f->flags)) {
|
||||
/* free job if fence has a parent job */
|
||||
struct amdgpu_job *job;
|
||||
|
||||
job = container_of(f, struct amdgpu_job, hw_fence);
|
||||
kfree(job);
|
||||
} else {
|
||||
/* free fence_slab if it's separated fence*/
|
||||
struct amdgpu_fence *fence;
|
||||
kmem_cache_free(amdgpu_fence_slab, to_amdgpu_fence(f));
|
||||
}
|
||||
|
||||
fence = to_amdgpu_fence(f);
|
||||
kmem_cache_free(amdgpu_fence_slab, fence);
|
||||
}
|
||||
/**
|
||||
* amdgpu_job_fence_free - free up the job with embedded fence
|
||||
*
|
||||
* @rcu: RCU callback head
|
||||
*
|
||||
* Free up the job with embedded fence after the RCU grace period.
|
||||
*/
|
||||
static void amdgpu_job_fence_free(struct rcu_head *rcu)
|
||||
{
|
||||
struct dma_fence *f = container_of(rcu, struct dma_fence, rcu);
|
||||
|
||||
/* free job if fence has a parent job */
|
||||
kfree(container_of(f, struct amdgpu_job, hw_fence));
|
||||
}
|
||||
|
||||
/**
|
||||
@ -720,6 +749,19 @@ static void amdgpu_fence_release(struct dma_fence *f)
|
||||
call_rcu(&f->rcu, amdgpu_fence_free);
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_job_fence_release - callback that job embedded fence can be freed
|
||||
*
|
||||
* @f: fence
|
||||
*
|
||||
* This is the simliar function with amdgpu_fence_release above, it
|
||||
* only handles the job embedded fence.
|
||||
*/
|
||||
static void amdgpu_job_fence_release(struct dma_fence *f)
|
||||
{
|
||||
call_rcu(&f->rcu, amdgpu_job_fence_free);
|
||||
}
|
||||
|
||||
static const struct dma_fence_ops amdgpu_fence_ops = {
|
||||
.get_driver_name = amdgpu_fence_get_driver_name,
|
||||
.get_timeline_name = amdgpu_fence_get_timeline_name,
|
||||
@ -727,6 +769,12 @@ static const struct dma_fence_ops amdgpu_fence_ops = {
|
||||
.release = amdgpu_fence_release,
|
||||
};
|
||||
|
||||
static const struct dma_fence_ops amdgpu_job_fence_ops = {
|
||||
.get_driver_name = amdgpu_fence_get_driver_name,
|
||||
.get_timeline_name = amdgpu_job_fence_get_timeline_name,
|
||||
.enable_signaling = amdgpu_job_fence_enable_signaling,
|
||||
.release = amdgpu_job_fence_release,
|
||||
};
|
||||
|
||||
/*
|
||||
* Fence debugfs
|
||||
|
@ -53,9 +53,6 @@ enum amdgpu_ring_priority_level {
|
||||
#define AMDGPU_FENCE_FLAG_INT (1 << 1)
|
||||
#define AMDGPU_FENCE_FLAG_TC_WB_ONLY (1 << 2)
|
||||
|
||||
/* fence flag bit to indicate the face is embedded in job*/
|
||||
#define AMDGPU_FENCE_FLAG_EMBED_IN_JOB_BIT (DMA_FENCE_FLAG_USER_BITS + 1)
|
||||
|
||||
#define to_amdgpu_ring(s) container_of((s), struct amdgpu_ring, sched)
|
||||
|
||||
#define AMDGPU_IB_POOL_SIZE (1024 * 1024)
|
||||
@ -114,6 +111,7 @@ struct amdgpu_fence_driver {
|
||||
struct dma_fence **fences;
|
||||
};
|
||||
|
||||
void amdgpu_fence_driver_clear_job_fences(struct amdgpu_ring *ring);
|
||||
void amdgpu_fence_driver_force_completion(struct amdgpu_ring *ring);
|
||||
|
||||
int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
|
||||
|
@ -246,6 +246,13 @@ static int vcn_v1_0_suspend(void *handle)
|
||||
{
|
||||
int r;
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
bool idle_work_unexecuted;
|
||||
|
||||
idle_work_unexecuted = cancel_delayed_work_sync(&adev->vcn.idle_work);
|
||||
if (idle_work_unexecuted) {
|
||||
if (adev->pm.dpm_enabled)
|
||||
amdgpu_dpm_enable_uvd(adev, false);
|
||||
}
|
||||
|
||||
r = vcn_v1_0_hw_fini(adev);
|
||||
if (r)
|
||||
|
@ -158,6 +158,7 @@ static void dcn31_update_clocks(struct clk_mgr *clk_mgr_base,
|
||||
union display_idle_optimization_u idle_info = { 0 };
|
||||
idle_info.idle_info.df_request_disabled = 1;
|
||||
idle_info.idle_info.phy_ref_clk_off = 1;
|
||||
idle_info.idle_info.s0i2_rdy = 1;
|
||||
dcn31_smu_set_display_idle_optimization(clk_mgr, idle_info.data);
|
||||
/* update power state */
|
||||
clk_mgr_base->clks.pwr_state = DCN_PWR_STATE_LOW_POWER;
|
||||
|
@ -3945,12 +3945,9 @@ static void update_psp_stream_config(struct pipe_ctx *pipe_ctx, bool dpms_off)
|
||||
config.dig_be = pipe_ctx->stream->link->link_enc_hw_inst;
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN)
|
||||
config.stream_enc_idx = pipe_ctx->stream_res.stream_enc->id - ENGINE_ID_DIGA;
|
||||
|
||||
|
||||
if (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_PHY ||
|
||||
pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) {
|
||||
link_enc = pipe_ctx->stream->link->link_enc;
|
||||
config.dio_output_type = pipe_ctx->stream->link->ep_type;
|
||||
config.dio_output_idx = link_enc->transmitter - TRANSMITTER_UNIPHY_A;
|
||||
if (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_PHY)
|
||||
link_enc = pipe_ctx->stream->link->link_enc;
|
||||
else if (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA)
|
||||
|
@ -78,6 +78,7 @@ static const struct hw_sequencer_funcs dcn10_funcs = {
|
||||
.get_clock = dcn10_get_clock,
|
||||
.get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync,
|
||||
.calc_vupdate_position = dcn10_calc_vupdate_position,
|
||||
.power_down = dce110_power_down,
|
||||
.set_backlight_level = dce110_set_backlight_level,
|
||||
.set_abm_immediate_disable = dce110_set_abm_immediate_disable,
|
||||
.set_pipe = dce110_set_pipe,
|
||||
|
@ -1069,7 +1069,7 @@ static const struct dc_debug_options debug_defaults_drv = {
|
||||
.timing_trace = false,
|
||||
.clock_trace = true,
|
||||
.disable_pplib_clock_request = true,
|
||||
.pipe_split_policy = MPC_SPLIT_AVOID_MULT_DISP,
|
||||
.pipe_split_policy = MPC_SPLIT_DYNAMIC,
|
||||
.force_single_disp_pipe_split = false,
|
||||
.disable_dcc = DCC_ENABLE,
|
||||
.vsr_support = true,
|
||||
|
@ -603,7 +603,7 @@ static const struct dc_debug_options debug_defaults_drv = {
|
||||
.timing_trace = false,
|
||||
.clock_trace = true,
|
||||
.disable_pplib_clock_request = true,
|
||||
.pipe_split_policy = MPC_SPLIT_AVOID,
|
||||
.pipe_split_policy = MPC_SPLIT_DYNAMIC,
|
||||
.force_single_disp_pipe_split = false,
|
||||
.disable_dcc = DCC_ENABLE,
|
||||
.vsr_support = true,
|
||||
|
@ -874,7 +874,7 @@ static const struct dc_debug_options debug_defaults_drv = {
|
||||
.clock_trace = true,
|
||||
.disable_pplib_clock_request = true,
|
||||
.min_disp_clk_khz = 100000,
|
||||
.pipe_split_policy = MPC_SPLIT_AVOID_MULT_DISP,
|
||||
.pipe_split_policy = MPC_SPLIT_DYNAMIC,
|
||||
.force_single_disp_pipe_split = false,
|
||||
.disable_dcc = DCC_ENABLE,
|
||||
.vsr_support = true,
|
||||
|
@ -840,7 +840,7 @@ static const struct dc_debug_options debug_defaults_drv = {
|
||||
.timing_trace = false,
|
||||
.clock_trace = true,
|
||||
.disable_pplib_clock_request = true,
|
||||
.pipe_split_policy = MPC_SPLIT_AVOID_MULT_DISP,
|
||||
.pipe_split_policy = MPC_SPLIT_DYNAMIC,
|
||||
.force_single_disp_pipe_split = false,
|
||||
.disable_dcc = DCC_ENABLE,
|
||||
.vsr_support = true,
|
||||
|
@ -686,7 +686,7 @@ static const struct dc_debug_options debug_defaults_drv = {
|
||||
.disable_clock_gate = true,
|
||||
.disable_pplib_clock_request = true,
|
||||
.disable_pplib_wm_range = true,
|
||||
.pipe_split_policy = MPC_SPLIT_AVOID_MULT_DISP,
|
||||
.pipe_split_policy = MPC_SPLIT_DYNAMIC,
|
||||
.force_single_disp_pipe_split = false,
|
||||
.disable_dcc = DCC_ENABLE,
|
||||
.vsr_support = true,
|
||||
|
@ -211,7 +211,7 @@ static const struct dc_debug_options debug_defaults_drv = {
|
||||
.timing_trace = false,
|
||||
.clock_trace = true,
|
||||
.disable_pplib_clock_request = true,
|
||||
.pipe_split_policy = MPC_SPLIT_AVOID_MULT_DISP,
|
||||
.pipe_split_policy = MPC_SPLIT_DYNAMIC,
|
||||
.force_single_disp_pipe_split = false,
|
||||
.disable_dcc = DCC_ENABLE,
|
||||
.vsr_support = true,
|
||||
|
@ -193,7 +193,7 @@ static const struct dc_debug_options debug_defaults_drv = {
|
||||
.timing_trace = false,
|
||||
.clock_trace = true,
|
||||
.disable_pplib_clock_request = true,
|
||||
.pipe_split_policy = MPC_SPLIT_AVOID_MULT_DISP,
|
||||
.pipe_split_policy = MPC_SPLIT_DYNAMIC,
|
||||
.force_single_disp_pipe_split = false,
|
||||
.disable_dcc = DCC_ENABLE,
|
||||
.vsr_support = true,
|
||||
|
@ -101,6 +101,7 @@ static const struct hw_sequencer_funcs dcn31_funcs = {
|
||||
.z10_restore = dcn31_z10_restore,
|
||||
.z10_save_init = dcn31_z10_save_init,
|
||||
.set_disp_pattern_generator = dcn30_set_disp_pattern_generator,
|
||||
.optimize_pwr_state = dcn21_optimize_pwr_state,
|
||||
.exit_optimized_pwr_state = dcn21_exit_optimized_pwr_state,
|
||||
.update_visual_confirm_color = dcn20_update_visual_confirm_color,
|
||||
};
|
||||
|
@ -355,6 +355,14 @@ static const struct dce110_clk_src_regs clk_src_regs[] = {
|
||||
clk_src_regs(3, D),
|
||||
clk_src_regs(4, E)
|
||||
};
|
||||
/*pll_id being rempped in dmub, in driver it is logical instance*/
|
||||
static const struct dce110_clk_src_regs clk_src_regs_b0[] = {
|
||||
clk_src_regs(0, A),
|
||||
clk_src_regs(1, B),
|
||||
clk_src_regs(2, F),
|
||||
clk_src_regs(3, G),
|
||||
clk_src_regs(4, E)
|
||||
};
|
||||
|
||||
static const struct dce110_clk_src_shift cs_shift = {
|
||||
CS_COMMON_MASK_SH_LIST_DCN2_0(__SHIFT)
|
||||
@ -994,7 +1002,7 @@ static const struct dc_debug_options debug_defaults_drv = {
|
||||
.timing_trace = false,
|
||||
.clock_trace = true,
|
||||
.disable_pplib_clock_request = false,
|
||||
.pipe_split_policy = MPC_SPLIT_AVOID,
|
||||
.pipe_split_policy = MPC_SPLIT_DYNAMIC,
|
||||
.force_single_disp_pipe_split = false,
|
||||
.disable_dcc = DCC_ENABLE,
|
||||
.vsr_support = true,
|
||||
@ -2276,14 +2284,27 @@ static bool dcn31_resource_construct(
|
||||
dcn30_clock_source_create(ctx, ctx->dc_bios,
|
||||
CLOCK_SOURCE_COMBO_PHY_PLL1,
|
||||
&clk_src_regs[1], false);
|
||||
pool->base.clock_sources[DCN31_CLK_SRC_PLL2] =
|
||||
/*move phypllx_pixclk_resync to dmub next*/
|
||||
if (dc->ctx->asic_id.hw_internal_rev == YELLOW_CARP_B0) {
|
||||
pool->base.clock_sources[DCN31_CLK_SRC_PLL2] =
|
||||
dcn30_clock_source_create(ctx, ctx->dc_bios,
|
||||
CLOCK_SOURCE_COMBO_PHY_PLL2,
|
||||
&clk_src_regs_b0[2], false);
|
||||
pool->base.clock_sources[DCN31_CLK_SRC_PLL3] =
|
||||
dcn30_clock_source_create(ctx, ctx->dc_bios,
|
||||
CLOCK_SOURCE_COMBO_PHY_PLL3,
|
||||
&clk_src_regs_b0[3], false);
|
||||
} else {
|
||||
pool->base.clock_sources[DCN31_CLK_SRC_PLL2] =
|
||||
dcn30_clock_source_create(ctx, ctx->dc_bios,
|
||||
CLOCK_SOURCE_COMBO_PHY_PLL2,
|
||||
&clk_src_regs[2], false);
|
||||
pool->base.clock_sources[DCN31_CLK_SRC_PLL3] =
|
||||
pool->base.clock_sources[DCN31_CLK_SRC_PLL3] =
|
||||
dcn30_clock_source_create(ctx, ctx->dc_bios,
|
||||
CLOCK_SOURCE_COMBO_PHY_PLL3,
|
||||
&clk_src_regs[3], false);
|
||||
}
|
||||
|
||||
pool->base.clock_sources[DCN31_CLK_SRC_PLL4] =
|
||||
dcn30_clock_source_create(ctx, ctx->dc_bios,
|
||||
CLOCK_SOURCE_COMBO_PHY_PLL4,
|
||||
|
@ -49,4 +49,35 @@ struct resource_pool *dcn31_create_resource_pool(
|
||||
const struct dc_init_data *init_data,
|
||||
struct dc *dc);
|
||||
|
||||
/*temp: B0 specific before switch to dcn313 headers*/
|
||||
#ifndef regPHYPLLF_PIXCLK_RESYNC_CNTL
|
||||
#define regPHYPLLF_PIXCLK_RESYNC_CNTL 0x007e
|
||||
#define regPHYPLLF_PIXCLK_RESYNC_CNTL_BASE_IDX 1
|
||||
#define regPHYPLLG_PIXCLK_RESYNC_CNTL 0x005f
|
||||
#define regPHYPLLG_PIXCLK_RESYNC_CNTL_BASE_IDX 1
|
||||
|
||||
//PHYPLLF_PIXCLK_RESYNC_CNTL
|
||||
#define PHYPLLF_PIXCLK_RESYNC_CNTL__PHYPLLF_PIXCLK_RESYNC_ENABLE__SHIFT 0x0
|
||||
#define PHYPLLF_PIXCLK_RESYNC_CNTL__PHYPLLF_DEEP_COLOR_DTO_ENABLE_STATUS__SHIFT 0x1
|
||||
#define PHYPLLF_PIXCLK_RESYNC_CNTL__PHYPLLF_DCCG_DEEP_COLOR_CNTL__SHIFT 0x4
|
||||
#define PHYPLLF_PIXCLK_RESYNC_CNTL__PHYPLLF_PIXCLK_ENABLE__SHIFT 0x8
|
||||
#define PHYPLLF_PIXCLK_RESYNC_CNTL__PHYPLLF_PIXCLK_DOUBLE_RATE_ENABLE__SHIFT 0x9
|
||||
#define PHYPLLF_PIXCLK_RESYNC_CNTL__PHYPLLF_PIXCLK_RESYNC_ENABLE_MASK 0x00000001L
|
||||
#define PHYPLLF_PIXCLK_RESYNC_CNTL__PHYPLLF_DEEP_COLOR_DTO_ENABLE_STATUS_MASK 0x00000002L
|
||||
#define PHYPLLF_PIXCLK_RESYNC_CNTL__PHYPLLF_DCCG_DEEP_COLOR_CNTL_MASK 0x00000030L
|
||||
#define PHYPLLF_PIXCLK_RESYNC_CNTL__PHYPLLF_PIXCLK_ENABLE_MASK 0x00000100L
|
||||
#define PHYPLLF_PIXCLK_RESYNC_CNTL__PHYPLLF_PIXCLK_DOUBLE_RATE_ENABLE_MASK 0x00000200L
|
||||
|
||||
//PHYPLLG_PIXCLK_RESYNC_CNTL
|
||||
#define PHYPLLG_PIXCLK_RESYNC_CNTL__PHYPLLG_PIXCLK_RESYNC_ENABLE__SHIFT 0x0
|
||||
#define PHYPLLG_PIXCLK_RESYNC_CNTL__PHYPLLG_DEEP_COLOR_DTO_ENABLE_STATUS__SHIFT 0x1
|
||||
#define PHYPLLG_PIXCLK_RESYNC_CNTL__PHYPLLG_DCCG_DEEP_COLOR_CNTL__SHIFT 0x4
|
||||
#define PHYPLLG_PIXCLK_RESYNC_CNTL__PHYPLLG_PIXCLK_ENABLE__SHIFT 0x8
|
||||
#define PHYPLLG_PIXCLK_RESYNC_CNTL__PHYPLLG_PIXCLK_DOUBLE_RATE_ENABLE__SHIFT 0x9
|
||||
#define PHYPLLG_PIXCLK_RESYNC_CNTL__PHYPLLG_PIXCLK_RESYNC_ENABLE_MASK 0x00000001L
|
||||
#define PHYPLLG_PIXCLK_RESYNC_CNTL__PHYPLLG_DEEP_COLOR_DTO_ENABLE_STATUS_MASK 0x00000002L
|
||||
#define PHYPLLG_PIXCLK_RESYNC_CNTL__PHYPLLG_DCCG_DEEP_COLOR_CNTL_MASK 0x00000030L
|
||||
#define PHYPLLG_PIXCLK_RESYNC_CNTL__PHYPLLG_PIXCLK_ENABLE_MASK 0x00000100L
|
||||
#define PHYPLLG_PIXCLK_RESYNC_CNTL__PHYPLLG_PIXCLK_DOUBLE_RATE_ENABLE_MASK 0x00000200L
|
||||
#endif
|
||||
#endif /* _DCN31_RESOURCE_H_ */
|
||||
|
@ -143,6 +143,55 @@ struct gc_info_v1_0 {
|
||||
uint32_t gc_num_gl2a;
|
||||
};
|
||||
|
||||
struct gc_info_v1_1 {
|
||||
struct gpu_info_header header;
|
||||
|
||||
uint32_t gc_num_se;
|
||||
uint32_t gc_num_wgp0_per_sa;
|
||||
uint32_t gc_num_wgp1_per_sa;
|
||||
uint32_t gc_num_rb_per_se;
|
||||
uint32_t gc_num_gl2c;
|
||||
uint32_t gc_num_gprs;
|
||||
uint32_t gc_num_max_gs_thds;
|
||||
uint32_t gc_gs_table_depth;
|
||||
uint32_t gc_gsprim_buff_depth;
|
||||
uint32_t gc_parameter_cache_depth;
|
||||
uint32_t gc_double_offchip_lds_buffer;
|
||||
uint32_t gc_wave_size;
|
||||
uint32_t gc_max_waves_per_simd;
|
||||
uint32_t gc_max_scratch_slots_per_cu;
|
||||
uint32_t gc_lds_size;
|
||||
uint32_t gc_num_sc_per_se;
|
||||
uint32_t gc_num_sa_per_se;
|
||||
uint32_t gc_num_packer_per_sc;
|
||||
uint32_t gc_num_gl2a;
|
||||
uint32_t gc_num_tcp_per_sa;
|
||||
uint32_t gc_num_sdp_interface;
|
||||
uint32_t gc_num_tcps;
|
||||
};
|
||||
|
||||
struct gc_info_v2_0 {
|
||||
struct gpu_info_header header;
|
||||
|
||||
uint32_t gc_num_se;
|
||||
uint32_t gc_num_cu_per_sh;
|
||||
uint32_t gc_num_sh_per_se;
|
||||
uint32_t gc_num_rb_per_se;
|
||||
uint32_t gc_num_tccs;
|
||||
uint32_t gc_num_gprs;
|
||||
uint32_t gc_num_max_gs_thds;
|
||||
uint32_t gc_gs_table_depth;
|
||||
uint32_t gc_gsprim_buff_depth;
|
||||
uint32_t gc_parameter_cache_depth;
|
||||
uint32_t gc_double_offchip_lds_buffer;
|
||||
uint32_t gc_wave_size;
|
||||
uint32_t gc_max_waves_per_simd;
|
||||
uint32_t gc_max_scratch_slots_per_cu;
|
||||
uint32_t gc_lds_size;
|
||||
uint32_t gc_num_sc_per_se;
|
||||
uint32_t gc_num_packer_per_sc;
|
||||
};
|
||||
|
||||
typedef struct harvest_info_header {
|
||||
uint32_t signature; /* Table Signature */
|
||||
uint32_t version; /* Table Version */
|
||||
|
@ -1568,9 +1568,7 @@ static int smu_suspend(void *handle)
|
||||
|
||||
smu->watermarks_bitmap &= ~(WATERMARKS_LOADED);
|
||||
|
||||
/* skip CGPG when in S0ix */
|
||||
if (smu->is_apu && !adev->in_s0ix)
|
||||
smu_set_gfx_cgpg(&adev->smu, false);
|
||||
smu_set_gfx_cgpg(&adev->smu, false);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -1601,8 +1599,7 @@ static int smu_resume(void *handle)
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (smu->is_apu)
|
||||
smu_set_gfx_cgpg(&adev->smu, true);
|
||||
smu_set_gfx_cgpg(&adev->smu, true);
|
||||
|
||||
smu->disable_uclk_switch = 0;
|
||||
|
||||
|
@ -120,7 +120,8 @@ int smu_v12_0_powergate_sdma(struct smu_context *smu, bool gate)
|
||||
|
||||
int smu_v12_0_set_gfx_cgpg(struct smu_context *smu, bool enable)
|
||||
{
|
||||
if (!(smu->adev->pg_flags & AMD_PG_SUPPORT_GFX_PG))
|
||||
/* Until now the SMU12 only implemented for Renoir series so here neen't do APU check. */
|
||||
if (!(smu->adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) || smu->adev->in_s0ix)
|
||||
return 0;
|
||||
|
||||
return smu_cmn_send_smc_msg_with_param(smu,
|
||||
|
@ -1621,7 +1621,7 @@ static int aldebaran_allow_xgmi_power_down(struct smu_context *smu, bool en)
|
||||
{
|
||||
return smu_cmn_send_smc_msg_with_param(smu,
|
||||
SMU_MSG_GmiPwrDnControl,
|
||||
en ? 1 : 0,
|
||||
en ? 0 : 1,
|
||||
NULL);
|
||||
}
|
||||
|
||||
|
@ -564,6 +564,7 @@ set_proto_ctx_engines_parallel_submit(struct i915_user_extension __user *base,
|
||||
container_of_user(base, typeof(*ext), base);
|
||||
const struct set_proto_ctx_engines *set = data;
|
||||
struct drm_i915_private *i915 = set->i915;
|
||||
struct i915_engine_class_instance prev_engine;
|
||||
u64 flags;
|
||||
int err = 0, n, i, j;
|
||||
u16 slot, width, num_siblings;
|
||||
@ -629,7 +630,6 @@ set_proto_ctx_engines_parallel_submit(struct i915_user_extension __user *base,
|
||||
/* Create contexts / engines */
|
||||
for (i = 0; i < width; ++i) {
|
||||
intel_engine_mask_t current_mask = 0;
|
||||
struct i915_engine_class_instance prev_engine;
|
||||
|
||||
for (j = 0; j < num_siblings; ++j) {
|
||||
struct i915_engine_class_instance ci;
|
||||
|
@ -3017,7 +3017,7 @@ eb_composite_fence_create(struct i915_execbuffer *eb, int out_fence_fd)
|
||||
fence_array = dma_fence_array_create(eb->num_batches,
|
||||
fences,
|
||||
eb->context->parallel.fence_context,
|
||||
eb->context->parallel.seqno,
|
||||
eb->context->parallel.seqno++,
|
||||
false);
|
||||
if (!fence_array) {
|
||||
kfree(fences);
|
||||
|
@ -353,34 +353,16 @@ nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan, bool e
|
||||
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
fobj = NULL;
|
||||
} else {
|
||||
fobj = dma_resv_shared_list(resv);
|
||||
}
|
||||
|
||||
fobj = dma_resv_shared_list(resv);
|
||||
fence = dma_resv_excl_fence(resv);
|
||||
|
||||
if (fence) {
|
||||
struct nouveau_channel *prev = NULL;
|
||||
bool must_wait = true;
|
||||
|
||||
f = nouveau_local_fence(fence, chan->drm);
|
||||
if (f) {
|
||||
rcu_read_lock();
|
||||
prev = rcu_dereference(f->channel);
|
||||
if (prev && (prev == chan || fctx->sync(f, prev, chan) == 0))
|
||||
must_wait = false;
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
if (must_wait)
|
||||
ret = dma_fence_wait(fence, intr);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (!exclusive || !fobj)
|
||||
return ret;
|
||||
|
||||
for (i = 0; i < fobj->shared_count && !ret; ++i) {
|
||||
/* Waiting for the exclusive fence first causes performance regressions
|
||||
* under some circumstances. So manually wait for the shared ones first.
|
||||
*/
|
||||
for (i = 0; i < (fobj ? fobj->shared_count : 0) && !ret; ++i) {
|
||||
struct nouveau_channel *prev = NULL;
|
||||
bool must_wait = true;
|
||||
|
||||
@ -400,6 +382,26 @@ nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan, bool e
|
||||
ret = dma_fence_wait(fence, intr);
|
||||
}
|
||||
|
||||
fence = dma_resv_excl_fence(resv);
|
||||
if (fence) {
|
||||
struct nouveau_channel *prev = NULL;
|
||||
bool must_wait = true;
|
||||
|
||||
f = nouveau_local_fence(fence, chan->drm);
|
||||
if (f) {
|
||||
rcu_read_lock();
|
||||
prev = rcu_dereference(f->channel);
|
||||
if (prev && (prev == chan || fctx->sync(f, prev, chan) == 0))
|
||||
must_wait = false;
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
if (must_wait)
|
||||
ret = dma_fence_wait(fence, intr);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user