drm fixes for 5.12-rc1 + msm-next
core: - vblank fence timing improvements dma-buf: - improve error handling ttm: - memory leak fix msm: - a6xx speedbin support - a508, a509, a512 support - various a5xx fixes - various dpu fixes - qseed3lite support for sm8250 - dsi fix for msm8994 - mdp5 fix for framerate bug with cmd mode panels - a6xx GMU OOB race fixes that were showing up in CI - various addition and removal of semicolons - gem submit fix for legacy userspace relocs path amdgpu: - Clang warning fix - S0ix platform shutdown/poweroff fix - Misc display fixes i915: - color format fix - -Wuninitialised reenabled - GVT ww locking, cmd parser fixes atyfb: - fix build rockchip: - AFBC modifier fix -----BEGIN PGP SIGNATURE----- iQIcBAABAgAGBQJgN/iUAAoJEAx081l5xIa+HZQP/RiK+wO8OvSrLeflUWEr2gUC 5NBbhGBB2dTNXeL7zFYXk4hC5HLYG/aDVfgXDctWVVcUslziEu3vsnHUuQQZrqxM 8MYRIaOvb+Ic/ZzU8A6Jz6gK3zv8Rmu2/PXOLjkYFTfpwAKPDhipCr6momn3rGLz cwzcKwCCHVPIUb6Hp+4739dsI8TsxhBcuIpjj+8yRZHxFNUue+40n1nPh6GTWXwf rbnr/sR0SnfUwv8pwzjLeH4Fuj8w2S65Cdbjap868jO2b0qVrGyLpRBTd3611L1X /HhFOnJ7BHHvZ28Jsgl3cbtaKqlB1geBq1Yh04xZWrHxcMhVC+J2cOtW2wyMJZ99 RayxPyxnCig0BuSHQzuWMT4o2wKbTgxBWL/Ys+QFVwZd0Y+BbKVrFPn2pRJ1vtb8 LNAQVppcyfdAIXF9eqse+JhqTn2Z2BgjM4cm/8EA7yvX6afRm+GJKghrLjWKYqrI 2iZKc8e4pgc532D3NseK2Xu4otIp7xPSW0wL6+g2ozLvHnwH+GJGI8TY85PoVJF2 ZrWkDWxW68v3mpxeJnX3VoMS9RUhBJjRoqcBsetf+bLL/qHeIdqEpPaTa9PAOGY4 CVR5vW6qvXfCjLCht8x/BTvLIToTFaHMIun6QYgCX1HbGu2k9ghpyC/IwiwwLsxV Yh9wHBR6Bzi/FRnZh23z =hrdL -----END PGP SIGNATURE----- Merge tag 'drm-next-2021-02-26' of git://anongit.freedesktop.org/drm/drm Pull more drm updates from Dave Airlie: "This is mostly fixes but I missed msm-next pull last week. It's been in drm-next. Otherwise it's a selection of i915, amdgpu and misc fixes, one TTM memory leak, nothing really major stands out otherwise. core: - vblank fence timing improvements dma-buf: - improve error handling ttm: - memory leak fix msm: - a6xx speedbin support - a508, a509, a512 support - various a5xx fixes - various dpu fixes - qseed3lite support for sm8250 - dsi fix for msm8994 - mdp5 fix for framerate bug with cmd mode panels - a6xx GMU OOB race fixes that were showing up in CI - various addition and removal of semicolons - gem submit fix for legacy userspace relocs path amdgpu: - clang warning fix - S0ix platform shutdown/poweroff fix - misc display fixes i915: - color format fix - -Wuninitialised reenabled - GVT ww locking, cmd parser fixes atyfb: - fix build rockchip: - AFBC modifier fix" * tag 'drm-next-2021-02-26' of git://anongit.freedesktop.org/drm/drm: (60 commits) drm/panel: kd35t133: allow using non-continuous dsi clock drm/rockchip: Require the YTR modifier for AFBC drm/ttm: Fix a memory leak drm/drm_vblank: set the dma-fence timestamp during send_vblank_event dma-fence: allow signaling drivers to set fence timestamp dma-buf: heaps: Rework heap allocation hooks to return struct dma_buf instead of fd dma-buf: system_heap: Make sure to return an error if we abort drm/amd/display: Fix system hang after multiple hotplugs (v3) drm/amdgpu: fix shutdown and poweroff process failed with s0ix drm/i915: Nuke INTEL_OUTPUT_FORMAT_INVALID drm/i915: Enable -Wuninitialized drm/amd/display: Remove Assert from dcn10_get_dig_frontend drm/amd/display: Add vupdate_no_lock interrupts for DCN2.1 Revert "drm/amd/display: reuse current context instead of recreating one" drm/amd/pm/swsmu: Avoid using structure_size uninitialized in smu_cmn_init_soft_gpu_metrics fbdev: atyfb: add stubs for aty_{ld,st}_lcd() drm/i915/gvt: Introduce per object locking in GVT scheduler. drm/i915/gvt: Purge dev_priv->gt drm/i915/gvt: Parse default state to update reg whitelist dt-bindings: dp-connector: Drop maxItems from -supply ...
This commit is contained in:
commit
fdce29602f
@ -26,7 +26,6 @@ properties:
|
||||
|
||||
dp-pwr-supply:
|
||||
description: Power supply for the DP_PWR pin
|
||||
maxItems: 1
|
||||
|
||||
port:
|
||||
$ref: /schemas/graph.yaml#/properties/port
|
||||
|
@ -311,6 +311,83 @@ void __dma_fence_might_wait(void)
|
||||
#endif
|
||||
|
||||
|
||||
/**
|
||||
* dma_fence_signal_timestamp_locked - signal completion of a fence
|
||||
* @fence: the fence to signal
|
||||
* @timestamp: fence signal timestamp in kernel's CLOCK_MONOTONIC time domain
|
||||
*
|
||||
* Signal completion for software callbacks on a fence, this will unblock
|
||||
* dma_fence_wait() calls and run all the callbacks added with
|
||||
* dma_fence_add_callback(). Can be called multiple times, but since a fence
|
||||
* can only go from the unsignaled to the signaled state and not back, it will
|
||||
* only be effective the first time. Set the timestamp provided as the fence
|
||||
* signal timestamp.
|
||||
*
|
||||
* Unlike dma_fence_signal_timestamp(), this function must be called with
|
||||
* &dma_fence.lock held.
|
||||
*
|
||||
* Returns 0 on success and a negative error value when @fence has been
|
||||
* signalled already.
|
||||
*/
|
||||
int dma_fence_signal_timestamp_locked(struct dma_fence *fence,
|
||||
ktime_t timestamp)
|
||||
{
|
||||
struct dma_fence_cb *cur, *tmp;
|
||||
struct list_head cb_list;
|
||||
|
||||
lockdep_assert_held(fence->lock);
|
||||
|
||||
if (unlikely(test_and_set_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
|
||||
&fence->flags)))
|
||||
return -EINVAL;
|
||||
|
||||
/* Stash the cb_list before replacing it with the timestamp */
|
||||
list_replace(&fence->cb_list, &cb_list);
|
||||
|
||||
fence->timestamp = timestamp;
|
||||
set_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags);
|
||||
trace_dma_fence_signaled(fence);
|
||||
|
||||
list_for_each_entry_safe(cur, tmp, &cb_list, node) {
|
||||
INIT_LIST_HEAD(&cur->node);
|
||||
cur->func(fence, cur);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(dma_fence_signal_timestamp_locked);
|
||||
|
||||
/**
|
||||
* dma_fence_signal_timestamp - signal completion of a fence
|
||||
* @fence: the fence to signal
|
||||
* @timestamp: fence signal timestamp in kernel's CLOCK_MONOTONIC time domain
|
||||
*
|
||||
* Signal completion for software callbacks on a fence, this will unblock
|
||||
* dma_fence_wait() calls and run all the callbacks added with
|
||||
* dma_fence_add_callback(). Can be called multiple times, but since a fence
|
||||
* can only go from the unsignaled to the signaled state and not back, it will
|
||||
* only be effective the first time. Set the timestamp provided as the fence
|
||||
* signal timestamp.
|
||||
*
|
||||
* Returns 0 on success and a negative error value when @fence has been
|
||||
* signalled already.
|
||||
*/
|
||||
int dma_fence_signal_timestamp(struct dma_fence *fence, ktime_t timestamp)
|
||||
{
|
||||
unsigned long flags;
|
||||
int ret;
|
||||
|
||||
if (!fence)
|
||||
return -EINVAL;
|
||||
|
||||
spin_lock_irqsave(fence->lock, flags);
|
||||
ret = dma_fence_signal_timestamp_locked(fence, timestamp);
|
||||
spin_unlock_irqrestore(fence->lock, flags);
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(dma_fence_signal_timestamp);
|
||||
|
||||
/**
|
||||
* dma_fence_signal_locked - signal completion of a fence
|
||||
* @fence: the fence to signal
|
||||
@ -329,28 +406,7 @@ void __dma_fence_might_wait(void)
|
||||
*/
|
||||
int dma_fence_signal_locked(struct dma_fence *fence)
|
||||
{
|
||||
struct dma_fence_cb *cur, *tmp;
|
||||
struct list_head cb_list;
|
||||
|
||||
lockdep_assert_held(fence->lock);
|
||||
|
||||
if (unlikely(test_and_set_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
|
||||
&fence->flags)))
|
||||
return -EINVAL;
|
||||
|
||||
/* Stash the cb_list before replacing it with the timestamp */
|
||||
list_replace(&fence->cb_list, &cb_list);
|
||||
|
||||
fence->timestamp = ktime_get();
|
||||
set_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags);
|
||||
trace_dma_fence_signaled(fence);
|
||||
|
||||
list_for_each_entry_safe(cur, tmp, &cb_list, node) {
|
||||
INIT_LIST_HEAD(&cur->node);
|
||||
cur->func(fence, cur);
|
||||
}
|
||||
|
||||
return 0;
|
||||
return dma_fence_signal_timestamp_locked(fence, ktime_get());
|
||||
}
|
||||
EXPORT_SYMBOL(dma_fence_signal_locked);
|
||||
|
||||
@ -379,7 +435,7 @@ int dma_fence_signal(struct dma_fence *fence)
|
||||
tmp = dma_fence_begin_signalling();
|
||||
|
||||
spin_lock_irqsave(fence->lock, flags);
|
||||
ret = dma_fence_signal_locked(fence);
|
||||
ret = dma_fence_signal_timestamp_locked(fence, ktime_get());
|
||||
spin_unlock_irqrestore(fence->lock, flags);
|
||||
|
||||
dma_fence_end_signalling(tmp);
|
||||
|
@ -52,6 +52,9 @@ static int dma_heap_buffer_alloc(struct dma_heap *heap, size_t len,
|
||||
unsigned int fd_flags,
|
||||
unsigned int heap_flags)
|
||||
{
|
||||
struct dma_buf *dmabuf;
|
||||
int fd;
|
||||
|
||||
/*
|
||||
* Allocations from all heaps have to begin
|
||||
* and end on page boundaries.
|
||||
@ -60,7 +63,16 @@ static int dma_heap_buffer_alloc(struct dma_heap *heap, size_t len,
|
||||
if (!len)
|
||||
return -EINVAL;
|
||||
|
||||
return heap->ops->allocate(heap, len, fd_flags, heap_flags);
|
||||
dmabuf = heap->ops->allocate(heap, len, fd_flags, heap_flags);
|
||||
if (IS_ERR(dmabuf))
|
||||
return PTR_ERR(dmabuf);
|
||||
|
||||
fd = dma_buf_fd(dmabuf, fd_flags);
|
||||
if (fd < 0) {
|
||||
dma_buf_put(dmabuf);
|
||||
/* just return, as put will call release and that will free */
|
||||
}
|
||||
return fd;
|
||||
}
|
||||
|
||||
static int dma_heap_open(struct inode *inode, struct file *file)
|
||||
|
@ -271,10 +271,10 @@ static const struct dma_buf_ops cma_heap_buf_ops = {
|
||||
.release = cma_heap_dma_buf_release,
|
||||
};
|
||||
|
||||
static int cma_heap_allocate(struct dma_heap *heap,
|
||||
unsigned long len,
|
||||
unsigned long fd_flags,
|
||||
unsigned long heap_flags)
|
||||
static struct dma_buf *cma_heap_allocate(struct dma_heap *heap,
|
||||
unsigned long len,
|
||||
unsigned long fd_flags,
|
||||
unsigned long heap_flags)
|
||||
{
|
||||
struct cma_heap *cma_heap = dma_heap_get_drvdata(heap);
|
||||
struct cma_heap_buffer *buffer;
|
||||
@ -289,7 +289,7 @@ static int cma_heap_allocate(struct dma_heap *heap,
|
||||
|
||||
buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
|
||||
if (!buffer)
|
||||
return -ENOMEM;
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
INIT_LIST_HEAD(&buffer->attachments);
|
||||
mutex_init(&buffer->lock);
|
||||
@ -348,15 +348,7 @@ static int cma_heap_allocate(struct dma_heap *heap,
|
||||
ret = PTR_ERR(dmabuf);
|
||||
goto free_pages;
|
||||
}
|
||||
|
||||
ret = dma_buf_fd(dmabuf, fd_flags);
|
||||
if (ret < 0) {
|
||||
dma_buf_put(dmabuf);
|
||||
/* just return, as put will call release and that will free */
|
||||
return ret;
|
||||
}
|
||||
|
||||
return ret;
|
||||
return dmabuf;
|
||||
|
||||
free_pages:
|
||||
kfree(buffer->pages);
|
||||
@ -365,7 +357,7 @@ free_cma:
|
||||
free_buffer:
|
||||
kfree(buffer);
|
||||
|
||||
return ret;
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
static const struct dma_heap_ops cma_heap_ops = {
|
||||
|
@ -331,10 +331,10 @@ static struct page *alloc_largest_available(unsigned long size,
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static int system_heap_allocate(struct dma_heap *heap,
|
||||
unsigned long len,
|
||||
unsigned long fd_flags,
|
||||
unsigned long heap_flags)
|
||||
static struct dma_buf *system_heap_allocate(struct dma_heap *heap,
|
||||
unsigned long len,
|
||||
unsigned long fd_flags,
|
||||
unsigned long heap_flags)
|
||||
{
|
||||
struct system_heap_buffer *buffer;
|
||||
DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
|
||||
@ -349,7 +349,7 @@ static int system_heap_allocate(struct dma_heap *heap,
|
||||
|
||||
buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
|
||||
if (!buffer)
|
||||
return -ENOMEM;
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
INIT_LIST_HEAD(&buffer->attachments);
|
||||
mutex_init(&buffer->lock);
|
||||
@ -363,8 +363,10 @@ static int system_heap_allocate(struct dma_heap *heap,
|
||||
* Avoid trying to allocate memory if the process
|
||||
* has been killed by SIGKILL
|
||||
*/
|
||||
if (fatal_signal_pending(current))
|
||||
if (fatal_signal_pending(current)) {
|
||||
ret = -EINTR;
|
||||
goto free_buffer;
|
||||
}
|
||||
|
||||
page = alloc_largest_available(size_remaining, max_order);
|
||||
if (!page)
|
||||
@ -397,14 +399,7 @@ static int system_heap_allocate(struct dma_heap *heap,
|
||||
ret = PTR_ERR(dmabuf);
|
||||
goto free_pages;
|
||||
}
|
||||
|
||||
ret = dma_buf_fd(dmabuf, fd_flags);
|
||||
if (ret < 0) {
|
||||
dma_buf_put(dmabuf);
|
||||
/* just return, as put will call release and that will free */
|
||||
return ret;
|
||||
}
|
||||
return ret;
|
||||
return dmabuf;
|
||||
|
||||
free_pages:
|
||||
for_each_sgtable_sg(table, sg, i) {
|
||||
@ -418,7 +413,7 @@ free_buffer:
|
||||
__free_pages(page, compound_order(page));
|
||||
kfree(buffer);
|
||||
|
||||
return ret;
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
static const struct dma_heap_ops system_heap_ops = {
|
||||
|
@ -1008,6 +1008,12 @@ struct amdgpu_device {
|
||||
bool in_suspend;
|
||||
bool in_hibernate;
|
||||
|
||||
/*
|
||||
* The combination flag in_poweroff_reboot_com used to identify the poweroff
|
||||
* and reboot opt in the s0i3 system-wide suspend.
|
||||
*/
|
||||
bool in_poweroff_reboot_com;
|
||||
|
||||
atomic_t in_gpu_reset;
|
||||
enum pp_mp1_state mp1_state;
|
||||
struct rw_semaphore reset_sem;
|
||||
|
@ -2678,7 +2678,8 @@ static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev)
|
||||
{
|
||||
int i, r;
|
||||
|
||||
if (!amdgpu_acpi_is_s0ix_supported(adev) || amdgpu_in_reset(adev)) {
|
||||
if (adev->in_poweroff_reboot_com ||
|
||||
!amdgpu_acpi_is_s0ix_supported(adev) || amdgpu_in_reset(adev)) {
|
||||
amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
|
||||
amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
|
||||
}
|
||||
@ -3741,7 +3742,8 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
|
||||
|
||||
amdgpu_fence_driver_suspend(adev);
|
||||
|
||||
if (!amdgpu_acpi_is_s0ix_supported(adev) || amdgpu_in_reset(adev))
|
||||
if (adev->in_poweroff_reboot_com ||
|
||||
!amdgpu_acpi_is_s0ix_supported(adev) || amdgpu_in_reset(adev))
|
||||
r = amdgpu_device_ip_suspend_phase2(adev);
|
||||
else
|
||||
amdgpu_gfx_state_change_set(adev, sGpuChangeState_D3Entry);
|
||||
|
@ -1270,7 +1270,9 @@ amdgpu_pci_shutdown(struct pci_dev *pdev)
|
||||
*/
|
||||
if (!amdgpu_passthrough(adev))
|
||||
adev->mp1_state = PP_MP1_STATE_UNLOAD;
|
||||
adev->in_poweroff_reboot_com = true;
|
||||
amdgpu_device_ip_suspend(adev);
|
||||
adev->in_poweroff_reboot_com = false;
|
||||
adev->mp1_state = PP_MP1_STATE_NONE;
|
||||
}
|
||||
|
||||
@ -1312,8 +1314,13 @@ static int amdgpu_pmops_thaw(struct device *dev)
|
||||
static int amdgpu_pmops_poweroff(struct device *dev)
|
||||
{
|
||||
struct drm_device *drm_dev = dev_get_drvdata(dev);
|
||||
struct amdgpu_device *adev = drm_to_adev(drm_dev);
|
||||
int r;
|
||||
|
||||
return amdgpu_device_suspend(drm_dev, true);
|
||||
adev->in_poweroff_reboot_com = true;
|
||||
r = amdgpu_device_suspend(drm_dev, true);
|
||||
adev->in_poweroff_reboot_com = false;
|
||||
return r;
|
||||
}
|
||||
|
||||
static int amdgpu_pmops_restore(struct device *dev)
|
||||
|
@ -937,7 +937,49 @@ static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_
|
||||
|
||||
}
|
||||
#endif
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN)
|
||||
static void event_mall_stutter(struct work_struct *work)
|
||||
{
|
||||
|
||||
struct vblank_workqueue *vblank_work = container_of(work, struct vblank_workqueue, mall_work);
|
||||
struct amdgpu_display_manager *dm = vblank_work->dm;
|
||||
|
||||
mutex_lock(&dm->dc_lock);
|
||||
|
||||
if (vblank_work->enable)
|
||||
dm->active_vblank_irq_count++;
|
||||
else
|
||||
dm->active_vblank_irq_count--;
|
||||
|
||||
|
||||
dc_allow_idle_optimizations(
|
||||
dm->dc, dm->active_vblank_irq_count == 0 ? true : false);
|
||||
|
||||
DRM_DEBUG_DRIVER("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0);
|
||||
|
||||
|
||||
mutex_unlock(&dm->dc_lock);
|
||||
}
|
||||
|
||||
static struct vblank_workqueue *vblank_create_workqueue(struct amdgpu_device *adev, struct dc *dc)
|
||||
{
|
||||
|
||||
int max_caps = dc->caps.max_links;
|
||||
struct vblank_workqueue *vblank_work;
|
||||
int i = 0;
|
||||
|
||||
vblank_work = kcalloc(max_caps, sizeof(*vblank_work), GFP_KERNEL);
|
||||
if (ZERO_OR_NULL_PTR(vblank_work)) {
|
||||
kfree(vblank_work);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
for (i = 0; i < max_caps; i++)
|
||||
INIT_WORK(&vblank_work[i].mall_work, event_mall_stutter);
|
||||
|
||||
return vblank_work;
|
||||
}
|
||||
#endif
|
||||
static int amdgpu_dm_init(struct amdgpu_device *adev)
|
||||
{
|
||||
struct dc_init_data init_data;
|
||||
@ -957,6 +999,9 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
|
||||
|
||||
mutex_init(&adev->dm.dc_lock);
|
||||
mutex_init(&adev->dm.audio_lock);
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN)
|
||||
spin_lock_init(&adev->dm.vblank_lock);
|
||||
#endif
|
||||
|
||||
if(amdgpu_dm_irq_init(adev)) {
|
||||
DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
|
||||
@ -1071,6 +1116,17 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
|
||||
|
||||
amdgpu_dm_init_color_mod();
|
||||
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN)
|
||||
if (adev->dm.dc->caps.max_links > 0) {
|
||||
adev->dm.vblank_workqueue = vblank_create_workqueue(adev, adev->dm.dc);
|
||||
|
||||
if (!adev->dm.vblank_workqueue)
|
||||
DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n");
|
||||
else
|
||||
DRM_DEBUG_DRIVER("amdgpu: vblank_workqueue init done %p.\n", adev->dm.vblank_workqueue);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_DRM_AMD_DC_HDCP
|
||||
if (adev->dm.dc->caps.max_links > 0 && adev->asic_type >= CHIP_RAVEN) {
|
||||
adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
|
||||
@ -1936,7 +1992,7 @@ static void dm_gpureset_commit_state(struct dc_state *dc_state,
|
||||
dc_commit_updates_for_stream(
|
||||
dm->dc, bundle->surface_updates,
|
||||
dc_state->stream_status->plane_count,
|
||||
dc_state->streams[k], &bundle->stream_update);
|
||||
dc_state->streams[k], &bundle->stream_update, dc_state);
|
||||
}
|
||||
|
||||
cleanup:
|
||||
@ -1967,7 +2023,8 @@ static void dm_set_dpms_off(struct dc_link *link)
|
||||
|
||||
stream_update.stream = stream_state;
|
||||
dc_commit_updates_for_stream(stream_state->ctx->dc, NULL, 0,
|
||||
stream_state, &stream_update);
|
||||
stream_state, &stream_update,
|
||||
stream_state->ctx->dc->current_state);
|
||||
mutex_unlock(&adev->dm.dc_lock);
|
||||
}
|
||||
|
||||
@ -5374,7 +5431,10 @@ static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
|
||||
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
|
||||
struct amdgpu_device *adev = drm_to_adev(crtc->dev);
|
||||
struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN)
|
||||
struct amdgpu_display_manager *dm = &adev->dm;
|
||||
unsigned long flags;
|
||||
#endif
|
||||
int rc = 0;
|
||||
|
||||
if (enable) {
|
||||
@ -5397,22 +5457,15 @@ static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
|
||||
if (amdgpu_in_reset(adev))
|
||||
return 0;
|
||||
|
||||
mutex_lock(&dm->dc_lock);
|
||||
|
||||
if (enable)
|
||||
dm->active_vblank_irq_count++;
|
||||
else
|
||||
dm->active_vblank_irq_count--;
|
||||
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN)
|
||||
dc_allow_idle_optimizations(
|
||||
adev->dm.dc, dm->active_vblank_irq_count == 0 ? true : false);
|
||||
|
||||
DRM_DEBUG_DRIVER("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0);
|
||||
spin_lock_irqsave(&dm->vblank_lock, flags);
|
||||
dm->vblank_workqueue->dm = dm;
|
||||
dm->vblank_workqueue->otg_inst = acrtc->otg_inst;
|
||||
dm->vblank_workqueue->enable = enable;
|
||||
spin_unlock_irqrestore(&dm->vblank_lock, flags);
|
||||
schedule_work(&dm->vblank_workqueue->mall_work);
|
||||
#endif
|
||||
|
||||
mutex_unlock(&dm->dc_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -7663,7 +7716,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
|
||||
struct drm_crtc *pcrtc,
|
||||
bool wait_for_vblank)
|
||||
{
|
||||
int i;
|
||||
uint32_t i;
|
||||
uint64_t timestamp_ns;
|
||||
struct drm_plane *plane;
|
||||
struct drm_plane_state *old_plane_state, *new_plane_state;
|
||||
@ -7704,7 +7757,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
|
||||
amdgpu_dm_commit_cursors(state);
|
||||
|
||||
/* update planes when needed */
|
||||
for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
|
||||
for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
|
||||
struct drm_crtc *crtc = new_plane_state->crtc;
|
||||
struct drm_crtc_state *new_crtc_state;
|
||||
struct drm_framebuffer *fb = new_plane_state->fb;
|
||||
@ -7927,7 +7980,8 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
|
||||
bundle->surface_updates,
|
||||
planes_count,
|
||||
acrtc_state->stream,
|
||||
&bundle->stream_update);
|
||||
&bundle->stream_update,
|
||||
dc_state);
|
||||
|
||||
/**
|
||||
* Enable or disable the interrupts on the backend.
|
||||
@ -8263,13 +8317,13 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
|
||||
struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
|
||||
struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
|
||||
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
|
||||
struct dc_surface_update surface_updates[MAX_SURFACES];
|
||||
struct dc_surface_update dummy_updates[MAX_SURFACES];
|
||||
struct dc_stream_update stream_update;
|
||||
struct dc_info_packet hdr_packet;
|
||||
struct dc_stream_status *status = NULL;
|
||||
bool abm_changed, hdr_changed, scaling_changed;
|
||||
|
||||
memset(&surface_updates, 0, sizeof(surface_updates));
|
||||
memset(&dummy_updates, 0, sizeof(dummy_updates));
|
||||
memset(&stream_update, 0, sizeof(stream_update));
|
||||
|
||||
if (acrtc) {
|
||||
@ -8326,15 +8380,16 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
|
||||
* To fix this, DC should permit updating only stream properties.
|
||||
*/
|
||||
for (j = 0; j < status->plane_count; j++)
|
||||
surface_updates[j].surface = status->plane_states[j];
|
||||
dummy_updates[j].surface = status->plane_states[0];
|
||||
|
||||
|
||||
mutex_lock(&dm->dc_lock);
|
||||
dc_commit_updates_for_stream(dm->dc,
|
||||
surface_updates,
|
||||
dummy_updates,
|
||||
status->plane_count,
|
||||
dm_new_crtc_state->stream,
|
||||
&stream_update);
|
||||
&stream_update,
|
||||
dc_state);
|
||||
mutex_unlock(&dm->dc_lock);
|
||||
}
|
||||
|
||||
|
@ -92,6 +92,20 @@ struct dm_compressor_info {
|
||||
uint64_t gpu_addr;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct vblank_workqueue - Works to be executed in a separate thread during vblank
|
||||
* @mall_work: work for mall stutter
|
||||
* @dm: amdgpu display manager device
|
||||
* @otg_inst: otg instance of which vblank is being set
|
||||
* @enable: true if enable vblank
|
||||
*/
|
||||
struct vblank_workqueue {
|
||||
struct work_struct mall_work;
|
||||
struct amdgpu_display_manager *dm;
|
||||
int otg_inst;
|
||||
bool enable;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct amdgpu_dm_backlight_caps - Information about backlight
|
||||
*
|
||||
@ -243,6 +257,15 @@ struct amdgpu_display_manager {
|
||||
*/
|
||||
struct mutex audio_lock;
|
||||
|
||||
/**
|
||||
* @vblank_work_lock:
|
||||
*
|
||||
* Guards access to deferred vblank work state.
|
||||
*/
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN)
|
||||
spinlock_t vblank_lock;
|
||||
#endif
|
||||
|
||||
/**
|
||||
* @audio_component:
|
||||
*
|
||||
@ -321,6 +344,10 @@ struct amdgpu_display_manager {
|
||||
struct hdcp_workqueue *hdcp_workqueue;
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN)
|
||||
struct vblank_workqueue *vblank_workqueue;
|
||||
#endif
|
||||
|
||||
struct drm_atomic_state *cached_state;
|
||||
struct dc_state *cached_dc_state;
|
||||
|
||||
|
@ -2697,7 +2697,8 @@ void dc_commit_updates_for_stream(struct dc *dc,
|
||||
struct dc_surface_update *srf_updates,
|
||||
int surface_count,
|
||||
struct dc_stream_state *stream,
|
||||
struct dc_stream_update *stream_update)
|
||||
struct dc_stream_update *stream_update,
|
||||
struct dc_state *state)
|
||||
{
|
||||
const struct dc_stream_status *stream_status;
|
||||
enum surface_update_type update_type;
|
||||
@ -2716,12 +2717,6 @@ void dc_commit_updates_for_stream(struct dc *dc,
|
||||
|
||||
|
||||
if (update_type >= UPDATE_TYPE_FULL) {
|
||||
struct dc_plane_state *new_planes[MAX_SURFACES];
|
||||
|
||||
memset(new_planes, 0, sizeof(new_planes));
|
||||
|
||||
for (i = 0; i < surface_count; i++)
|
||||
new_planes[i] = srf_updates[i].surface;
|
||||
|
||||
/* initialize scratch memory for building context */
|
||||
context = dc_create_state(dc);
|
||||
@ -2730,21 +2725,15 @@ void dc_commit_updates_for_stream(struct dc *dc,
|
||||
return;
|
||||
}
|
||||
|
||||
dc_resource_state_copy_construct(
|
||||
dc->current_state, context);
|
||||
dc_resource_state_copy_construct(state, context);
|
||||
|
||||
/*remove old surfaces from context */
|
||||
if (!dc_rem_all_planes_for_stream(dc, stream, context)) {
|
||||
DC_ERROR("Failed to remove streams for new validate context!\n");
|
||||
return;
|
||||
for (i = 0; i < dc->res_pool->pipe_count; i++) {
|
||||
struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i];
|
||||
struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
|
||||
|
||||
if (new_pipe->plane_state && new_pipe->plane_state != old_pipe->plane_state)
|
||||
new_pipe->plane_state->force_full_update = true;
|
||||
}
|
||||
|
||||
/* add surface to context */
|
||||
if (!dc_add_all_planes_for_stream(dc, stream, new_planes, surface_count, context)) {
|
||||
DC_ERROR("Failed to add streams for new validate context!\n");
|
||||
return;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
||||
|
@ -294,7 +294,8 @@ void dc_commit_updates_for_stream(struct dc *dc,
|
||||
struct dc_surface_update *srf_updates,
|
||||
int surface_count,
|
||||
struct dc_stream_state *stream,
|
||||
struct dc_stream_update *stream_update);
|
||||
struct dc_stream_update *stream_update,
|
||||
struct dc_state *state);
|
||||
/*
|
||||
* Log the current stream state.
|
||||
*/
|
||||
|
@ -480,7 +480,6 @@ unsigned int dcn10_get_dig_frontend(struct link_encoder *enc)
|
||||
break;
|
||||
default:
|
||||
// invalid source select DIG
|
||||
ASSERT(false);
|
||||
result = ENGINE_ID_UNKNOWN;
|
||||
}
|
||||
|
||||
|
@ -539,6 +539,8 @@ void dcn30_init_hw(struct dc *dc)
|
||||
|
||||
fe = dc->links[i]->link_enc->funcs->get_dig_frontend(
|
||||
dc->links[i]->link_enc);
|
||||
if (fe == ENGINE_ID_UNKNOWN)
|
||||
continue;
|
||||
|
||||
for (j = 0; j < dc->res_pool->stream_enc_count; j++) {
|
||||
if (fe == dc->res_pool->stream_enc[j]->id) {
|
||||
|
@ -168,6 +168,11 @@ static const struct irq_source_info_funcs vblank_irq_info_funcs = {
|
||||
.ack = NULL
|
||||
};
|
||||
|
||||
static const struct irq_source_info_funcs vupdate_no_lock_irq_info_funcs = {
|
||||
.set = NULL,
|
||||
.ack = NULL
|
||||
};
|
||||
|
||||
#undef BASE_INNER
|
||||
#define BASE_INNER(seg) DMU_BASE__INST0_SEG ## seg
|
||||
|
||||
@ -230,6 +235,17 @@ static const struct irq_source_info_funcs vblank_irq_info_funcs = {
|
||||
.funcs = &vblank_irq_info_funcs\
|
||||
}
|
||||
|
||||
/* vupdate_no_lock_int_entry maps to DC_IRQ_SOURCE_VUPDATEx, to match semantic
|
||||
* of DCE's DC_IRQ_SOURCE_VUPDATEx.
|
||||
*/
|
||||
#define vupdate_no_lock_int_entry(reg_num)\
|
||||
[DC_IRQ_SOURCE_VUPDATE1 + reg_num] = {\
|
||||
IRQ_REG_ENTRY(OTG, reg_num,\
|
||||
OTG_GLOBAL_SYNC_STATUS, VUPDATE_NO_LOCK_INT_EN,\
|
||||
OTG_GLOBAL_SYNC_STATUS, VUPDATE_NO_LOCK_EVENT_CLEAR),\
|
||||
.funcs = &vupdate_no_lock_irq_info_funcs\
|
||||
}
|
||||
|
||||
#define vblank_int_entry(reg_num)\
|
||||
[DC_IRQ_SOURCE_VBLANK1 + reg_num] = {\
|
||||
IRQ_REG_ENTRY(OTG, reg_num,\
|
||||
@ -338,6 +354,12 @@ irq_source_info_dcn21[DAL_IRQ_SOURCES_NUMBER] = {
|
||||
vupdate_int_entry(3),
|
||||
vupdate_int_entry(4),
|
||||
vupdate_int_entry(5),
|
||||
vupdate_no_lock_int_entry(0),
|
||||
vupdate_no_lock_int_entry(1),
|
||||
vupdate_no_lock_int_entry(2),
|
||||
vupdate_no_lock_int_entry(3),
|
||||
vupdate_no_lock_int_entry(4),
|
||||
vupdate_no_lock_int_entry(5),
|
||||
vblank_int_entry(0),
|
||||
vblank_int_entry(1),
|
||||
vblank_int_entry(2),
|
||||
|
@ -762,7 +762,7 @@ void smu_cmn_init_soft_gpu_metrics(void *table, uint8_t frev, uint8_t crev)
|
||||
structure_size = sizeof(struct gpu_metrics_v2_0);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
return;
|
||||
}
|
||||
|
||||
#undef METRICS_VERSION
|
||||
|
@ -774,6 +774,72 @@ void drm_event_cancel_free(struct drm_device *dev,
|
||||
}
|
||||
EXPORT_SYMBOL(drm_event_cancel_free);
|
||||
|
||||
/**
|
||||
* drm_send_event_helper - send DRM event to file descriptor
|
||||
* @dev: DRM device
|
||||
* @e: DRM event to deliver
|
||||
* @timestamp: timestamp to set for the fence event in kernel's CLOCK_MONOTONIC
|
||||
* time domain
|
||||
*
|
||||
* This helper function sends the event @e, initialized with
|
||||
* drm_event_reserve_init(), to its associated userspace DRM file.
|
||||
* The timestamp variant of dma_fence_signal is used when the caller
|
||||
* sends a valid timestamp.
|
||||
*/
|
||||
void drm_send_event_helper(struct drm_device *dev,
|
||||
struct drm_pending_event *e, ktime_t timestamp)
|
||||
{
|
||||
assert_spin_locked(&dev->event_lock);
|
||||
|
||||
if (e->completion) {
|
||||
complete_all(e->completion);
|
||||
e->completion_release(e->completion);
|
||||
e->completion = NULL;
|
||||
}
|
||||
|
||||
if (e->fence) {
|
||||
if (timestamp)
|
||||
dma_fence_signal_timestamp(e->fence, timestamp);
|
||||
else
|
||||
dma_fence_signal(e->fence);
|
||||
dma_fence_put(e->fence);
|
||||
}
|
||||
|
||||
if (!e->file_priv) {
|
||||
kfree(e);
|
||||
return;
|
||||
}
|
||||
|
||||
list_del(&e->pending_link);
|
||||
list_add_tail(&e->link,
|
||||
&e->file_priv->event_list);
|
||||
wake_up_interruptible_poll(&e->file_priv->event_wait,
|
||||
EPOLLIN | EPOLLRDNORM);
|
||||
}
|
||||
|
||||
/**
|
||||
* drm_send_event_timestamp_locked - send DRM event to file descriptor
|
||||
* @dev: DRM device
|
||||
* @e: DRM event to deliver
|
||||
* @timestamp: timestamp to set for the fence event in kernel's CLOCK_MONOTONIC
|
||||
* time domain
|
||||
*
|
||||
* This function sends the event @e, initialized with drm_event_reserve_init(),
|
||||
* to its associated userspace DRM file. Callers must already hold
|
||||
* &drm_device.event_lock.
|
||||
*
|
||||
* Note that the core will take care of unlinking and disarming events when the
|
||||
* corresponding DRM file is closed. Drivers need not worry about whether the
|
||||
* DRM file for this event still exists and can call this function upon
|
||||
* completion of the asynchronous work unconditionally.
|
||||
*/
|
||||
void drm_send_event_timestamp_locked(struct drm_device *dev,
|
||||
struct drm_pending_event *e, ktime_t timestamp)
|
||||
{
|
||||
drm_send_event_helper(dev, e, timestamp);
|
||||
}
|
||||
EXPORT_SYMBOL(drm_send_event_timestamp_locked);
|
||||
|
||||
/**
|
||||
* drm_send_event_locked - send DRM event to file descriptor
|
||||
* @dev: DRM device
|
||||
@ -790,29 +856,7 @@ EXPORT_SYMBOL(drm_event_cancel_free);
|
||||
*/
|
||||
void drm_send_event_locked(struct drm_device *dev, struct drm_pending_event *e)
|
||||
{
|
||||
assert_spin_locked(&dev->event_lock);
|
||||
|
||||
if (e->completion) {
|
||||
complete_all(e->completion);
|
||||
e->completion_release(e->completion);
|
||||
e->completion = NULL;
|
||||
}
|
||||
|
||||
if (e->fence) {
|
||||
dma_fence_signal(e->fence);
|
||||
dma_fence_put(e->fence);
|
||||
}
|
||||
|
||||
if (!e->file_priv) {
|
||||
kfree(e);
|
||||
return;
|
||||
}
|
||||
|
||||
list_del(&e->pending_link);
|
||||
list_add_tail(&e->link,
|
||||
&e->file_priv->event_list);
|
||||
wake_up_interruptible_poll(&e->file_priv->event_wait,
|
||||
EPOLLIN | EPOLLRDNORM);
|
||||
drm_send_event_helper(dev, e, 0);
|
||||
}
|
||||
EXPORT_SYMBOL(drm_send_event_locked);
|
||||
|
||||
@ -836,7 +880,7 @@ void drm_send_event(struct drm_device *dev, struct drm_pending_event *e)
|
||||
unsigned long irqflags;
|
||||
|
||||
spin_lock_irqsave(&dev->event_lock, irqflags);
|
||||
drm_send_event_locked(dev, e);
|
||||
drm_send_event_helper(dev, e, 0);
|
||||
spin_unlock_irqrestore(&dev->event_lock, irqflags);
|
||||
}
|
||||
EXPORT_SYMBOL(drm_send_event);
|
||||
|
@ -1006,7 +1006,14 @@ static void send_vblank_event(struct drm_device *dev,
|
||||
break;
|
||||
}
|
||||
trace_drm_vblank_event_delivered(e->base.file_priv, e->pipe, seq);
|
||||
drm_send_event_locked(dev, &e->base);
|
||||
/*
|
||||
* Use the same timestamp for any associated fence signal to avoid
|
||||
* mismatch in timestamps for vsync & fence events triggered by the
|
||||
* same HW event. Frameworks like SurfaceFlinger in Android expects the
|
||||
* retire-fence timestamp to match exactly with HW vsync as it uses it
|
||||
* for its software vsync modeling.
|
||||
*/
|
||||
drm_send_event_timestamp_locked(dev, &e->base, now);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -21,7 +21,6 @@ subdir-ccflags-y += $(call cc-disable-warning, unused-but-set-variable)
|
||||
subdir-ccflags-y += $(call cc-disable-warning, sign-compare)
|
||||
subdir-ccflags-y += $(call cc-disable-warning, sometimes-uninitialized)
|
||||
subdir-ccflags-y += $(call cc-disable-warning, initializer-overrides)
|
||||
subdir-ccflags-y += $(call cc-disable-warning, uninitialized)
|
||||
subdir-ccflags-y += $(call cc-disable-warning, frame-address)
|
||||
subdir-ccflags-$(CONFIG_DRM_I915_WERROR) += -Werror
|
||||
|
||||
|
@ -109,7 +109,6 @@ void intel_crtc_state_reset(struct intel_crtc_state *crtc_state,
|
||||
crtc_state->cpu_transcoder = INVALID_TRANSCODER;
|
||||
crtc_state->master_transcoder = INVALID_TRANSCODER;
|
||||
crtc_state->hsw_workaround_pipe = INVALID_PIPE;
|
||||
crtc_state->output_format = INTEL_OUTPUT_FORMAT_INVALID;
|
||||
crtc_state->scaler_state.scaler_id = -1;
|
||||
crtc_state->mst_master_transcoder = INVALID_TRANSCODER;
|
||||
}
|
||||
|
@ -10211,7 +10211,6 @@ static void snprintf_output_types(char *buf, size_t len,
|
||||
}
|
||||
|
||||
static const char * const output_format_str[] = {
|
||||
[INTEL_OUTPUT_FORMAT_INVALID] = "Invalid",
|
||||
[INTEL_OUTPUT_FORMAT_RGB] = "RGB",
|
||||
[INTEL_OUTPUT_FORMAT_YCBCR420] = "YCBCR4:2:0",
|
||||
[INTEL_OUTPUT_FORMAT_YCBCR444] = "YCBCR4:4:4",
|
||||
@ -10220,7 +10219,7 @@ static const char * const output_format_str[] = {
|
||||
static const char *output_formats(enum intel_output_format format)
|
||||
{
|
||||
if (format >= ARRAY_SIZE(output_format_str))
|
||||
format = INTEL_OUTPUT_FORMAT_INVALID;
|
||||
return "invalid";
|
||||
return output_format_str[format];
|
||||
}
|
||||
|
||||
|
@ -830,7 +830,6 @@ struct intel_crtc_wm_state {
|
||||
};
|
||||
|
||||
enum intel_output_format {
|
||||
INTEL_OUTPUT_FORMAT_INVALID,
|
||||
INTEL_OUTPUT_FORMAT_RGB,
|
||||
INTEL_OUTPUT_FORMAT_YCBCR420,
|
||||
INTEL_OUTPUT_FORMAT_YCBCR444,
|
||||
|
@ -41,6 +41,7 @@
|
||||
#include "gt/intel_lrc.h"
|
||||
#include "gt/intel_ring.h"
|
||||
#include "gt/intel_gt_requests.h"
|
||||
#include "gt/shmem_utils.h"
|
||||
#include "gvt.h"
|
||||
#include "i915_pvinfo.h"
|
||||
#include "trace.h"
|
||||
@ -3094,71 +3095,28 @@ int intel_gvt_scan_and_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
|
||||
*/
|
||||
void intel_gvt_update_reg_whitelist(struct intel_vgpu *vgpu)
|
||||
{
|
||||
const unsigned long start = LRC_STATE_PN * PAGE_SIZE;
|
||||
struct intel_gvt *gvt = vgpu->gvt;
|
||||
struct drm_i915_private *dev_priv = gvt->gt->i915;
|
||||
struct intel_engine_cs *engine;
|
||||
enum intel_engine_id id;
|
||||
const unsigned long start = LRC_STATE_PN * PAGE_SIZE;
|
||||
struct i915_request *rq;
|
||||
struct intel_vgpu_submission *s = &vgpu->submission;
|
||||
struct i915_request *requests[I915_NUM_ENGINES] = {};
|
||||
bool is_ctx_pinned[I915_NUM_ENGINES] = {};
|
||||
int ret = 0;
|
||||
|
||||
if (gvt->is_reg_whitelist_updated)
|
||||
return;
|
||||
|
||||
for_each_engine(engine, &dev_priv->gt, id) {
|
||||
ret = intel_context_pin(s->shadow[id]);
|
||||
if (ret) {
|
||||
gvt_vgpu_err("fail to pin shadow ctx\n");
|
||||
goto out;
|
||||
}
|
||||
is_ctx_pinned[id] = true;
|
||||
|
||||
rq = i915_request_create(s->shadow[id]);
|
||||
if (IS_ERR(rq)) {
|
||||
gvt_vgpu_err("fail to alloc default request\n");
|
||||
ret = -EIO;
|
||||
goto out;
|
||||
}
|
||||
requests[id] = i915_request_get(rq);
|
||||
i915_request_add(rq);
|
||||
}
|
||||
|
||||
if (intel_gt_wait_for_idle(&dev_priv->gt,
|
||||
I915_GEM_IDLE_TIMEOUT) == -ETIME) {
|
||||
ret = -EIO;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* scan init ctx to update cmd accessible list */
|
||||
for_each_engine(engine, &dev_priv->gt, id) {
|
||||
int size = engine->context_size - PAGE_SIZE;
|
||||
void *vaddr;
|
||||
for_each_engine(engine, gvt->gt, id) {
|
||||
struct parser_exec_state s;
|
||||
struct drm_i915_gem_object *obj;
|
||||
struct i915_request *rq;
|
||||
void *vaddr;
|
||||
int ret;
|
||||
|
||||
rq = requests[id];
|
||||
GEM_BUG_ON(!i915_request_completed(rq));
|
||||
GEM_BUG_ON(!intel_context_is_pinned(rq->context));
|
||||
obj = rq->context->state->obj;
|
||||
if (!engine->default_state)
|
||||
continue;
|
||||
|
||||
if (!obj) {
|
||||
ret = -EIO;
|
||||
goto out;
|
||||
}
|
||||
|
||||
i915_gem_object_set_cache_coherency(obj,
|
||||
I915_CACHE_LLC);
|
||||
|
||||
vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB);
|
||||
vaddr = shmem_pin_map(engine->default_state);
|
||||
if (IS_ERR(vaddr)) {
|
||||
gvt_err("failed to pin init ctx obj, ring=%d, err=%lx\n",
|
||||
id, PTR_ERR(vaddr));
|
||||
ret = PTR_ERR(vaddr);
|
||||
goto out;
|
||||
gvt_err("failed to map %s->default state, err:%zd\n",
|
||||
engine->name, PTR_ERR(vaddr));
|
||||
return;
|
||||
}
|
||||
|
||||
s.buf_type = RING_BUFFER_CTX;
|
||||
@ -3166,9 +3124,9 @@ void intel_gvt_update_reg_whitelist(struct intel_vgpu *vgpu)
|
||||
s.vgpu = vgpu;
|
||||
s.engine = engine;
|
||||
s.ring_start = 0;
|
||||
s.ring_size = size;
|
||||
s.ring_size = engine->context_size - start;
|
||||
s.ring_head = 0;
|
||||
s.ring_tail = size;
|
||||
s.ring_tail = s.ring_size;
|
||||
s.rb_va = vaddr + start;
|
||||
s.workload = NULL;
|
||||
s.is_ctx_wa = false;
|
||||
@ -3176,29 +3134,18 @@ void intel_gvt_update_reg_whitelist(struct intel_vgpu *vgpu)
|
||||
|
||||
/* skipping the first RING_CTX_SIZE(0x50) dwords */
|
||||
ret = ip_gma_set(&s, RING_CTX_SIZE);
|
||||
if (ret) {
|
||||
i915_gem_object_unpin_map(obj);
|
||||
goto out;
|
||||
if (ret == 0) {
|
||||
ret = command_scan(&s, 0, s.ring_size, 0, s.ring_size);
|
||||
if (ret)
|
||||
gvt_err("Scan init ctx error\n");
|
||||
}
|
||||
|
||||
ret = command_scan(&s, 0, size, 0, size);
|
||||
shmem_unpin_map(engine->default_state, vaddr);
|
||||
if (ret)
|
||||
gvt_err("Scan init ctx error\n");
|
||||
|
||||
i915_gem_object_unpin_map(obj);
|
||||
return;
|
||||
}
|
||||
|
||||
out:
|
||||
if (!ret)
|
||||
gvt->is_reg_whitelist_updated = true;
|
||||
|
||||
for (id = 0; id < I915_NUM_ENGINES ; id++) {
|
||||
if (requests[id])
|
||||
i915_request_put(requests[id]);
|
||||
|
||||
if (is_ctx_pinned[id])
|
||||
intel_context_unpin(s->shadow[id]);
|
||||
}
|
||||
gvt->is_reg_whitelist_updated = true;
|
||||
}
|
||||
|
||||
int intel_gvt_scan_engine_context(struct intel_vgpu_workload *workload)
|
||||
|
@ -522,12 +522,11 @@ static void init_vgpu_execlist(struct intel_vgpu *vgpu,
|
||||
static void clean_execlist(struct intel_vgpu *vgpu,
|
||||
intel_engine_mask_t engine_mask)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
|
||||
struct intel_engine_cs *engine;
|
||||
struct intel_vgpu_submission *s = &vgpu->submission;
|
||||
struct intel_engine_cs *engine;
|
||||
intel_engine_mask_t tmp;
|
||||
|
||||
for_each_engine_masked(engine, &dev_priv->gt, engine_mask, tmp) {
|
||||
for_each_engine_masked(engine, vgpu->gvt->gt, engine_mask, tmp) {
|
||||
kfree(s->ring_scan_buffer[engine->id]);
|
||||
s->ring_scan_buffer[engine->id] = NULL;
|
||||
s->ring_scan_buffer_size[engine->id] = 0;
|
||||
@ -537,11 +536,10 @@ static void clean_execlist(struct intel_vgpu *vgpu,
|
||||
static void reset_execlist(struct intel_vgpu *vgpu,
|
||||
intel_engine_mask_t engine_mask)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
|
||||
struct intel_engine_cs *engine;
|
||||
intel_engine_mask_t tmp;
|
||||
|
||||
for_each_engine_masked(engine, &dev_priv->gt, engine_mask, tmp)
|
||||
for_each_engine_masked(engine, vgpu->gvt->gt, engine_mask, tmp)
|
||||
init_vgpu_execlist(vgpu, engine);
|
||||
}
|
||||
|
||||
|
@ -412,7 +412,9 @@ static void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
|
||||
if (!wa_ctx->indirect_ctx.obj)
|
||||
return;
|
||||
|
||||
i915_gem_object_lock(wa_ctx->indirect_ctx.obj, NULL);
|
||||
i915_gem_object_unpin_map(wa_ctx->indirect_ctx.obj);
|
||||
i915_gem_object_unlock(wa_ctx->indirect_ctx.obj);
|
||||
i915_gem_object_put(wa_ctx->indirect_ctx.obj);
|
||||
|
||||
wa_ctx->indirect_ctx.obj = NULL;
|
||||
@ -520,6 +522,7 @@ static int prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)
|
||||
struct intel_gvt *gvt = workload->vgpu->gvt;
|
||||
const int gmadr_bytes = gvt->device_info.gmadr_bytes_in_cmd;
|
||||
struct intel_vgpu_shadow_bb *bb;
|
||||
struct i915_gem_ww_ctx ww;
|
||||
int ret;
|
||||
|
||||
list_for_each_entry(bb, &workload->shadow_bb, list) {
|
||||
@ -544,10 +547,19 @@ static int prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)
|
||||
* directly
|
||||
*/
|
||||
if (!bb->ppgtt) {
|
||||
bb->vma = i915_gem_object_ggtt_pin(bb->obj,
|
||||
NULL, 0, 0, 0);
|
||||
i915_gem_ww_ctx_init(&ww, false);
|
||||
retry:
|
||||
i915_gem_object_lock(bb->obj, &ww);
|
||||
|
||||
bb->vma = i915_gem_object_ggtt_pin_ww(bb->obj, &ww,
|
||||
NULL, 0, 0, 0);
|
||||
if (IS_ERR(bb->vma)) {
|
||||
ret = PTR_ERR(bb->vma);
|
||||
if (ret == -EDEADLK) {
|
||||
ret = i915_gem_ww_ctx_backoff(&ww);
|
||||
if (!ret)
|
||||
goto retry;
|
||||
}
|
||||
goto err;
|
||||
}
|
||||
|
||||
@ -561,13 +573,15 @@ static int prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)
|
||||
0);
|
||||
if (ret)
|
||||
goto err;
|
||||
}
|
||||
|
||||
/* No one is going to touch shadow bb from now on. */
|
||||
i915_gem_object_flush_map(bb->obj);
|
||||
/* No one is going to touch shadow bb from now on. */
|
||||
i915_gem_object_flush_map(bb->obj);
|
||||
i915_gem_object_unlock(bb->obj);
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
err:
|
||||
i915_gem_ww_ctx_fini(&ww);
|
||||
release_shadow_batch_buffer(workload);
|
||||
return ret;
|
||||
}
|
||||
@ -594,14 +608,29 @@ static int prepare_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
|
||||
unsigned char *per_ctx_va =
|
||||
(unsigned char *)wa_ctx->indirect_ctx.shadow_va +
|
||||
wa_ctx->indirect_ctx.size;
|
||||
struct i915_gem_ww_ctx ww;
|
||||
int ret;
|
||||
|
||||
if (wa_ctx->indirect_ctx.size == 0)
|
||||
return 0;
|
||||
|
||||
vma = i915_gem_object_ggtt_pin(wa_ctx->indirect_ctx.obj, NULL,
|
||||
0, CACHELINE_BYTES, 0);
|
||||
if (IS_ERR(vma))
|
||||
return PTR_ERR(vma);
|
||||
i915_gem_ww_ctx_init(&ww, false);
|
||||
retry:
|
||||
i915_gem_object_lock(wa_ctx->indirect_ctx.obj, &ww);
|
||||
|
||||
vma = i915_gem_object_ggtt_pin_ww(wa_ctx->indirect_ctx.obj, &ww, NULL,
|
||||
0, CACHELINE_BYTES, 0);
|
||||
if (IS_ERR(vma)) {
|
||||
ret = PTR_ERR(vma);
|
||||
if (ret == -EDEADLK) {
|
||||
ret = i915_gem_ww_ctx_backoff(&ww);
|
||||
if (!ret)
|
||||
goto retry;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
i915_gem_object_unlock(wa_ctx->indirect_ctx.obj);
|
||||
|
||||
/* FIXME: we are not tracking our pinned VMA leaving it
|
||||
* up to the core to fix up the stray pin_count upon
|
||||
@ -635,12 +664,14 @@ static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload)
|
||||
|
||||
list_for_each_entry_safe(bb, pos, &workload->shadow_bb, list) {
|
||||
if (bb->obj) {
|
||||
i915_gem_object_lock(bb->obj, NULL);
|
||||
if (bb->va && !IS_ERR(bb->va))
|
||||
i915_gem_object_unpin_map(bb->obj);
|
||||
|
||||
if (bb->vma && !IS_ERR(bb->vma))
|
||||
i915_vma_unpin(bb->vma);
|
||||
|
||||
i915_gem_object_unlock(bb->obj);
|
||||
i915_gem_object_put(bb->obj);
|
||||
}
|
||||
list_del(&bb->list);
|
||||
@ -1015,13 +1046,12 @@ void intel_vgpu_clean_workloads(struct intel_vgpu *vgpu,
|
||||
intel_engine_mask_t engine_mask)
|
||||
{
|
||||
struct intel_vgpu_submission *s = &vgpu->submission;
|
||||
struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
|
||||
struct intel_engine_cs *engine;
|
||||
struct intel_vgpu_workload *pos, *n;
|
||||
intel_engine_mask_t tmp;
|
||||
|
||||
/* free the unsubmited workloads in the queues. */
|
||||
for_each_engine_masked(engine, &dev_priv->gt, engine_mask, tmp) {
|
||||
for_each_engine_masked(engine, vgpu->gvt->gt, engine_mask, tmp) {
|
||||
list_for_each_entry_safe(pos, n,
|
||||
&s->workload_q_head[engine->id], list) {
|
||||
list_del_init(&pos->list);
|
||||
|
@ -2367,6 +2367,8 @@ static inline uint32_t A5XX_VSC_RESOLVE_CNTL_Y(uint32_t val)
|
||||
|
||||
#define REG_A5XX_UCHE_ADDR_MODE_CNTL 0x00000e80
|
||||
|
||||
#define REG_A5XX_UCHE_MODE_CNTL 0x00000e81
|
||||
|
||||
#define REG_A5XX_UCHE_SVM_CNTL 0x00000e82
|
||||
|
||||
#define REG_A5XX_UCHE_WRITE_THRU_BASE_LO 0x00000e87
|
||||
|
@ -222,7 +222,7 @@ static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
|
||||
a5xx_preempt_trigger(gpu);
|
||||
}
|
||||
|
||||
static const struct {
|
||||
static const struct adreno_five_hwcg_regs {
|
||||
u32 offset;
|
||||
u32 value;
|
||||
} a5xx_hwcg[] = {
|
||||
@ -318,16 +318,124 @@ static const struct {
|
||||
{REG_A5XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00004000},
|
||||
{REG_A5XX_RBBM_CLOCK_DELAY_GPC, 0x00000200},
|
||||
{REG_A5XX_RBBM_CLOCK_DELAY_VFD, 0x00002222}
|
||||
}, a50x_hwcg[] = {
|
||||
{REG_A5XX_RBBM_CLOCK_CNTL_SP0, 0x02222222},
|
||||
{REG_A5XX_RBBM_CLOCK_CNTL2_SP0, 0x02222220},
|
||||
{REG_A5XX_RBBM_CLOCK_HYST_SP0, 0x0000F3CF},
|
||||
{REG_A5XX_RBBM_CLOCK_DELAY_SP0, 0x00000080},
|
||||
{REG_A5XX_RBBM_CLOCK_CNTL_TP0, 0x22222222},
|
||||
{REG_A5XX_RBBM_CLOCK_CNTL2_TP0, 0x22222222},
|
||||
{REG_A5XX_RBBM_CLOCK_CNTL3_TP0, 0x00002222},
|
||||
{REG_A5XX_RBBM_CLOCK_HYST_TP0, 0x77777777},
|
||||
{REG_A5XX_RBBM_CLOCK_HYST2_TP0, 0x77777777},
|
||||
{REG_A5XX_RBBM_CLOCK_HYST3_TP0, 0x00007777},
|
||||
{REG_A5XX_RBBM_CLOCK_DELAY_TP0, 0x11111111},
|
||||
{REG_A5XX_RBBM_CLOCK_DELAY2_TP0, 0x11111111},
|
||||
{REG_A5XX_RBBM_CLOCK_DELAY3_TP0, 0x00001111},
|
||||
{REG_A5XX_RBBM_CLOCK_CNTL2_UCHE, 0x22222222},
|
||||
{REG_A5XX_RBBM_CLOCK_CNTL3_UCHE, 0x22222222},
|
||||
{REG_A5XX_RBBM_CLOCK_CNTL4_UCHE, 0x00222222},
|
||||
{REG_A5XX_RBBM_CLOCK_CNTL_UCHE, 0x22222222},
|
||||
{REG_A5XX_RBBM_CLOCK_HYST_UCHE, 0x00FFFFF4},
|
||||
{REG_A5XX_RBBM_CLOCK_DELAY_UCHE, 0x00000002},
|
||||
{REG_A5XX_RBBM_CLOCK_CNTL_RB0, 0x22222222},
|
||||
{REG_A5XX_RBBM_CLOCK_CNTL2_RB0, 0x00222222},
|
||||
{REG_A5XX_RBBM_CLOCK_CNTL_CCU0, 0x00022220},
|
||||
{REG_A5XX_RBBM_CLOCK_CNTL_RAC, 0x05522222},
|
||||
{REG_A5XX_RBBM_CLOCK_CNTL2_RAC, 0x00505555},
|
||||
{REG_A5XX_RBBM_CLOCK_HYST_RB_CCU0, 0x04040404},
|
||||
{REG_A5XX_RBBM_CLOCK_HYST_RAC, 0x07444044},
|
||||
{REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_0, 0x00000002},
|
||||
{REG_A5XX_RBBM_CLOCK_DELAY_RAC, 0x00010011},
|
||||
{REG_A5XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM, 0x04222222},
|
||||
{REG_A5XX_RBBM_CLOCK_MODE_GPC, 0x02222222},
|
||||
{REG_A5XX_RBBM_CLOCK_MODE_VFD, 0x00002222},
|
||||
{REG_A5XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM, 0x00000000},
|
||||
{REG_A5XX_RBBM_CLOCK_HYST_GPC, 0x04104004},
|
||||
{REG_A5XX_RBBM_CLOCK_HYST_VFD, 0x00000000},
|
||||
{REG_A5XX_RBBM_CLOCK_DELAY_HLSQ, 0x00000000},
|
||||
{REG_A5XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00004000},
|
||||
{REG_A5XX_RBBM_CLOCK_DELAY_GPC, 0x00000200},
|
||||
{REG_A5XX_RBBM_CLOCK_DELAY_VFD, 0x00002222},
|
||||
}, a512_hwcg[] = {
|
||||
{REG_A5XX_RBBM_CLOCK_CNTL_SP0, 0x02222222},
|
||||
{REG_A5XX_RBBM_CLOCK_CNTL_SP1, 0x02222222},
|
||||
{REG_A5XX_RBBM_CLOCK_CNTL2_SP0, 0x02222220},
|
||||
{REG_A5XX_RBBM_CLOCK_CNTL2_SP1, 0x02222220},
|
||||
{REG_A5XX_RBBM_CLOCK_HYST_SP0, 0x0000F3CF},
|
||||
{REG_A5XX_RBBM_CLOCK_HYST_SP1, 0x0000F3CF},
|
||||
{REG_A5XX_RBBM_CLOCK_DELAY_SP0, 0x00000080},
|
||||
{REG_A5XX_RBBM_CLOCK_DELAY_SP1, 0x00000080},
|
||||
{REG_A5XX_RBBM_CLOCK_CNTL_TP0, 0x22222222},
|
||||
{REG_A5XX_RBBM_CLOCK_CNTL_TP1, 0x22222222},
|
||||
{REG_A5XX_RBBM_CLOCK_CNTL2_TP0, 0x22222222},
|
||||
{REG_A5XX_RBBM_CLOCK_CNTL2_TP1, 0x22222222},
|
||||
{REG_A5XX_RBBM_CLOCK_CNTL3_TP0, 0x00002222},
|
||||
{REG_A5XX_RBBM_CLOCK_CNTL3_TP1, 0x00002222},
|
||||
{REG_A5XX_RBBM_CLOCK_HYST_TP0, 0x77777777},
|
||||
{REG_A5XX_RBBM_CLOCK_HYST_TP1, 0x77777777},
|
||||
{REG_A5XX_RBBM_CLOCK_HYST2_TP0, 0x77777777},
|
||||
{REG_A5XX_RBBM_CLOCK_HYST2_TP1, 0x77777777},
|
||||
{REG_A5XX_RBBM_CLOCK_HYST3_TP0, 0x00007777},
|
||||
{REG_A5XX_RBBM_CLOCK_HYST3_TP1, 0x00007777},
|
||||
{REG_A5XX_RBBM_CLOCK_DELAY_TP0, 0x11111111},
|
||||
{REG_A5XX_RBBM_CLOCK_DELAY_TP1, 0x11111111},
|
||||
{REG_A5XX_RBBM_CLOCK_DELAY2_TP0, 0x11111111},
|
||||
{REG_A5XX_RBBM_CLOCK_DELAY2_TP1, 0x11111111},
|
||||
{REG_A5XX_RBBM_CLOCK_DELAY3_TP0, 0x00001111},
|
||||
{REG_A5XX_RBBM_CLOCK_DELAY3_TP1, 0x00001111},
|
||||
{REG_A5XX_RBBM_CLOCK_CNTL_UCHE, 0x22222222},
|
||||
{REG_A5XX_RBBM_CLOCK_CNTL2_UCHE, 0x22222222},
|
||||
{REG_A5XX_RBBM_CLOCK_CNTL3_UCHE, 0x22222222},
|
||||
{REG_A5XX_RBBM_CLOCK_CNTL4_UCHE, 0x00222222},
|
||||
{REG_A5XX_RBBM_CLOCK_HYST_UCHE, 0x00444444},
|
||||
{REG_A5XX_RBBM_CLOCK_DELAY_UCHE, 0x00000002},
|
||||
{REG_A5XX_RBBM_CLOCK_CNTL_RB0, 0x22222222},
|
||||
{REG_A5XX_RBBM_CLOCK_CNTL_RB1, 0x22222222},
|
||||
{REG_A5XX_RBBM_CLOCK_CNTL2_RB0, 0x00222222},
|
||||
{REG_A5XX_RBBM_CLOCK_CNTL2_RB1, 0x00222222},
|
||||
{REG_A5XX_RBBM_CLOCK_CNTL_CCU0, 0x00022220},
|
||||
{REG_A5XX_RBBM_CLOCK_CNTL_CCU1, 0x00022220},
|
||||
{REG_A5XX_RBBM_CLOCK_CNTL_RAC, 0x05522222},
|
||||
{REG_A5XX_RBBM_CLOCK_CNTL2_RAC, 0x00505555},
|
||||
{REG_A5XX_RBBM_CLOCK_HYST_RB_CCU0, 0x04040404},
|
||||
{REG_A5XX_RBBM_CLOCK_HYST_RB_CCU1, 0x04040404},
|
||||
{REG_A5XX_RBBM_CLOCK_HYST_RAC, 0x07444044},
|
||||
{REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_0, 0x00000002},
|
||||
{REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_1, 0x00000002},
|
||||
{REG_A5XX_RBBM_CLOCK_DELAY_RAC, 0x00010011},
|
||||
{REG_A5XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM, 0x04222222},
|
||||
{REG_A5XX_RBBM_CLOCK_MODE_GPC, 0x02222222},
|
||||
{REG_A5XX_RBBM_CLOCK_MODE_VFD, 0x00002222},
|
||||
{REG_A5XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM, 0x00000000},
|
||||
{REG_A5XX_RBBM_CLOCK_HYST_GPC, 0x04104004},
|
||||
{REG_A5XX_RBBM_CLOCK_HYST_VFD, 0x00000000},
|
||||
{REG_A5XX_RBBM_CLOCK_DELAY_HLSQ, 0x00000000},
|
||||
{REG_A5XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00004000},
|
||||
{REG_A5XX_RBBM_CLOCK_DELAY_GPC, 0x00000200},
|
||||
{REG_A5XX_RBBM_CLOCK_DELAY_VFD, 0x00002222},
|
||||
};
|
||||
|
||||
void a5xx_set_hwcg(struct msm_gpu *gpu, bool state)
|
||||
{
|
||||
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
|
||||
unsigned int i;
|
||||
const struct adreno_five_hwcg_regs *regs;
|
||||
unsigned int i, sz;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(a5xx_hwcg); i++)
|
||||
gpu_write(gpu, a5xx_hwcg[i].offset,
|
||||
state ? a5xx_hwcg[i].value : 0);
|
||||
if (adreno_is_a508(adreno_gpu)) {
|
||||
regs = a50x_hwcg;
|
||||
sz = ARRAY_SIZE(a50x_hwcg);
|
||||
} else if (adreno_is_a509(adreno_gpu) || adreno_is_a512(adreno_gpu)) {
|
||||
regs = a512_hwcg;
|
||||
sz = ARRAY_SIZE(a512_hwcg);
|
||||
} else {
|
||||
regs = a5xx_hwcg;
|
||||
sz = ARRAY_SIZE(a5xx_hwcg);
|
||||
}
|
||||
|
||||
for (i = 0; i < sz; i++)
|
||||
gpu_write(gpu, regs[i].offset,
|
||||
state ? regs[i].value : 0);
|
||||
|
||||
if (adreno_is_a540(adreno_gpu)) {
|
||||
gpu_write(gpu, REG_A5XX_RBBM_CLOCK_DELAY_GPMU, state ? 0x00000770 : 0);
|
||||
@ -538,11 +646,13 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
|
||||
{
|
||||
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
|
||||
struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
|
||||
u32 regbit;
|
||||
int ret;
|
||||
|
||||
gpu_write(gpu, REG_A5XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x00000003);
|
||||
|
||||
if (adreno_is_a540(adreno_gpu))
|
||||
if (adreno_is_a509(adreno_gpu) || adreno_is_a512(adreno_gpu) ||
|
||||
adreno_is_a540(adreno_gpu))
|
||||
gpu_write(gpu, REG_A5XX_VBIF_GATE_OFF_WRREQ_EN, 0x00000009);
|
||||
|
||||
/* Make all blocks contribute to the GPU BUSY perf counter */
|
||||
@ -604,29 +714,48 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
|
||||
0x00100000 + adreno_gpu->gmem - 1);
|
||||
gpu_write(gpu, REG_A5XX_UCHE_GMEM_RANGE_MAX_HI, 0x00000000);
|
||||
|
||||
if (adreno_is_a510(adreno_gpu)) {
|
||||
if (adreno_is_a508(adreno_gpu) || adreno_is_a510(adreno_gpu)) {
|
||||
gpu_write(gpu, REG_A5XX_CP_MEQ_THRESHOLDS, 0x20);
|
||||
gpu_write(gpu, REG_A5XX_CP_MERCIU_SIZE, 0x20);
|
||||
if (adreno_is_a508(adreno_gpu))
|
||||
gpu_write(gpu, REG_A5XX_CP_MERCIU_SIZE, 0x400);
|
||||
else
|
||||
gpu_write(gpu, REG_A5XX_CP_MERCIU_SIZE, 0x20);
|
||||
gpu_write(gpu, REG_A5XX_CP_ROQ_THRESHOLDS_2, 0x40000030);
|
||||
gpu_write(gpu, REG_A5XX_CP_ROQ_THRESHOLDS_1, 0x20100D0A);
|
||||
gpu_write(gpu, REG_A5XX_PC_DBG_ECO_CNTL,
|
||||
(0x200 << 11 | 0x200 << 22));
|
||||
} else {
|
||||
gpu_write(gpu, REG_A5XX_CP_MEQ_THRESHOLDS, 0x40);
|
||||
if (adreno_is_a530(adreno_gpu))
|
||||
gpu_write(gpu, REG_A5XX_CP_MERCIU_SIZE, 0x40);
|
||||
if (adreno_is_a540(adreno_gpu))
|
||||
else
|
||||
gpu_write(gpu, REG_A5XX_CP_MERCIU_SIZE, 0x400);
|
||||
gpu_write(gpu, REG_A5XX_CP_ROQ_THRESHOLDS_2, 0x80000060);
|
||||
gpu_write(gpu, REG_A5XX_CP_ROQ_THRESHOLDS_1, 0x40201B16);
|
||||
}
|
||||
|
||||
if (adreno_is_a508(adreno_gpu))
|
||||
gpu_write(gpu, REG_A5XX_PC_DBG_ECO_CNTL,
|
||||
(0x100 << 11 | 0x100 << 22));
|
||||
else if (adreno_is_a509(adreno_gpu) || adreno_is_a510(adreno_gpu) ||
|
||||
adreno_is_a512(adreno_gpu))
|
||||
gpu_write(gpu, REG_A5XX_PC_DBG_ECO_CNTL,
|
||||
(0x200 << 11 | 0x200 << 22));
|
||||
else
|
||||
gpu_write(gpu, REG_A5XX_PC_DBG_ECO_CNTL,
|
||||
(0x400 << 11 | 0x300 << 22));
|
||||
}
|
||||
|
||||
if (adreno_gpu->info->quirks & ADRENO_QUIRK_TWO_PASS_USE_WFI)
|
||||
gpu_rmw(gpu, REG_A5XX_PC_DBG_ECO_CNTL, 0, (1 << 8));
|
||||
|
||||
gpu_write(gpu, REG_A5XX_PC_DBG_ECO_CNTL, 0xc0200100);
|
||||
/*
|
||||
* Disable the RB sampler datapath DP2 clock gating optimization
|
||||
* for 1-SP GPUs, as it is enabled by default.
|
||||
*/
|
||||
if (adreno_is_a508(adreno_gpu) || adreno_is_a509(adreno_gpu) ||
|
||||
adreno_is_a512(adreno_gpu))
|
||||
gpu_rmw(gpu, REG_A5XX_RB_DBG_ECO_CNTL, 0, (1 << 9));
|
||||
|
||||
/* Disable UCHE global filter as SP can invalidate/flush independently */
|
||||
gpu_write(gpu, REG_A5XX_UCHE_MODE_CNTL, BIT(29));
|
||||
|
||||
/* Enable USE_RETENTION_FLOPS */
|
||||
gpu_write(gpu, REG_A5XX_CP_CHICKEN_DBG, 0x02000000);
|
||||
@ -653,10 +782,20 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
|
||||
gpu_write(gpu, REG_A5XX_RBBM_AHB_CNTL2, 0x0000003F);
|
||||
|
||||
/* Set the highest bank bit */
|
||||
gpu_write(gpu, REG_A5XX_TPL1_MODE_CNTL, 2 << 7);
|
||||
gpu_write(gpu, REG_A5XX_RB_MODE_CNTL, 2 << 1);
|
||||
if (adreno_is_a540(adreno_gpu))
|
||||
gpu_write(gpu, REG_A5XX_UCHE_DBG_ECO_CNTL_2, 2);
|
||||
regbit = 2;
|
||||
else
|
||||
regbit = 1;
|
||||
|
||||
gpu_write(gpu, REG_A5XX_TPL1_MODE_CNTL, regbit << 7);
|
||||
gpu_write(gpu, REG_A5XX_RB_MODE_CNTL, regbit << 1);
|
||||
|
||||
if (adreno_is_a509(adreno_gpu) || adreno_is_a512(adreno_gpu) ||
|
||||
adreno_is_a540(adreno_gpu))
|
||||
gpu_write(gpu, REG_A5XX_UCHE_DBG_ECO_CNTL_2, regbit);
|
||||
|
||||
/* Disable All flat shading optimization (ALLFLATOPTDIS) */
|
||||
gpu_rmw(gpu, REG_A5XX_VPC_DBG_ECO_CNTL, 0, (1 << 10));
|
||||
|
||||
/* Protect registers from the CP */
|
||||
gpu_write(gpu, REG_A5XX_CP_PROTECT_CNTL, 0x00000007);
|
||||
@ -688,12 +827,14 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
|
||||
|
||||
/* VPC */
|
||||
gpu_write(gpu, REG_A5XX_CP_PROTECT(14), ADRENO_PROTECT_RW(0xE68, 8));
|
||||
gpu_write(gpu, REG_A5XX_CP_PROTECT(15), ADRENO_PROTECT_RW(0xE70, 4));
|
||||
gpu_write(gpu, REG_A5XX_CP_PROTECT(15), ADRENO_PROTECT_RW(0xE70, 16));
|
||||
|
||||
/* UCHE */
|
||||
gpu_write(gpu, REG_A5XX_CP_PROTECT(16), ADRENO_PROTECT_RW(0xE80, 16));
|
||||
|
||||
if (adreno_is_a530(adreno_gpu) || adreno_is_a510(adreno_gpu))
|
||||
if (adreno_is_a508(adreno_gpu) || adreno_is_a509(adreno_gpu) ||
|
||||
adreno_is_a510(adreno_gpu) || adreno_is_a512(adreno_gpu) ||
|
||||
adreno_is_a530(adreno_gpu))
|
||||
gpu_write(gpu, REG_A5XX_CP_PROTECT(17),
|
||||
ADRENO_PROTECT_RW(0x10000, 0x8000));
|
||||
|
||||
@ -735,7 +876,8 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (!adreno_is_a510(adreno_gpu))
|
||||
if (!(adreno_is_a508(adreno_gpu) || adreno_is_a509(adreno_gpu) ||
|
||||
adreno_is_a510(adreno_gpu) || adreno_is_a512(adreno_gpu)))
|
||||
a5xx_gpmu_ucode_init(gpu);
|
||||
|
||||
ret = a5xx_ucode_init(gpu);
|
||||
@ -1168,7 +1310,8 @@ static int a5xx_pm_resume(struct msm_gpu *gpu)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (adreno_is_a510(adreno_gpu)) {
|
||||
/* Adreno 508, 509, 510, 512 needs manual RBBM sus/res control */
|
||||
if (!(adreno_is_a530(adreno_gpu) || adreno_is_a540(adreno_gpu))) {
|
||||
/* Halt the sp_input_clk at HM level */
|
||||
gpu_write(gpu, REG_A5XX_RBBM_CLOCK_CNTL, 0x00000055);
|
||||
a5xx_set_hwcg(gpu, true);
|
||||
@ -1210,8 +1353,8 @@ static int a5xx_pm_suspend(struct msm_gpu *gpu)
|
||||
u32 mask = 0xf;
|
||||
int i, ret;
|
||||
|
||||
/* A510 has 3 XIN ports in VBIF */
|
||||
if (adreno_is_a510(adreno_gpu))
|
||||
/* A508, A510 have 3 XIN ports in VBIF */
|
||||
if (adreno_is_a508(adreno_gpu) || adreno_is_a510(adreno_gpu))
|
||||
mask = 0x7;
|
||||
|
||||
/* Clear the VBIF pipe before shutting down */
|
||||
@ -1223,10 +1366,12 @@ static int a5xx_pm_suspend(struct msm_gpu *gpu)
|
||||
|
||||
/*
|
||||
* Reset the VBIF before power collapse to avoid issue with FIFO
|
||||
* entries
|
||||
* entries on Adreno A510 and A530 (the others will tend to lock up)
|
||||
*/
|
||||
gpu_write(gpu, REG_A5XX_RBBM_BLOCK_SW_RESET_CMD, 0x003C0000);
|
||||
gpu_write(gpu, REG_A5XX_RBBM_BLOCK_SW_RESET_CMD, 0x00000000);
|
||||
if (adreno_is_a510(adreno_gpu) || adreno_is_a530(adreno_gpu)) {
|
||||
gpu_write(gpu, REG_A5XX_RBBM_BLOCK_SW_RESET_CMD, 0x003C0000);
|
||||
gpu_write(gpu, REG_A5XX_RBBM_BLOCK_SW_RESET_CMD, 0x00000000);
|
||||
}
|
||||
|
||||
ret = msm_gpu_pm_suspend(gpu);
|
||||
if (ret)
|
||||
|
@ -298,7 +298,7 @@ int a5xx_power_init(struct msm_gpu *gpu)
|
||||
int ret;
|
||||
|
||||
/* Not all A5xx chips have a GPMU */
|
||||
if (adreno_is_a510(adreno_gpu))
|
||||
if (!(adreno_is_a530(adreno_gpu) || adreno_is_a540(adreno_gpu)))
|
||||
return 0;
|
||||
|
||||
/* Set up the limits management */
|
||||
@ -330,7 +330,7 @@ void a5xx_gpmu_ucode_init(struct msm_gpu *gpu)
|
||||
unsigned int *data, *ptr, *cmds;
|
||||
unsigned int cmds_size;
|
||||
|
||||
if (adreno_is_a510(adreno_gpu))
|
||||
if (!(adreno_is_a530(adreno_gpu) || adreno_is_a540(adreno_gpu)))
|
||||
return;
|
||||
|
||||
if (a5xx_gpu->gpmu_bo)
|
||||
|
@ -245,37 +245,66 @@ static int a6xx_gmu_hfi_start(struct a6xx_gmu *gmu)
|
||||
return ret;
|
||||
}
|
||||
|
||||
struct a6xx_gmu_oob_bits {
|
||||
int set, ack, set_new, ack_new;
|
||||
const char *name;
|
||||
};
|
||||
|
||||
/* These are the interrupt / ack bits for each OOB request that are set
|
||||
* in a6xx_gmu_set_oob and a6xx_clear_oob
|
||||
*/
|
||||
static const struct a6xx_gmu_oob_bits a6xx_gmu_oob_bits[] = {
|
||||
[GMU_OOB_GPU_SET] = {
|
||||
.name = "GPU_SET",
|
||||
.set = 16,
|
||||
.ack = 24,
|
||||
.set_new = 30,
|
||||
.ack_new = 31,
|
||||
},
|
||||
|
||||
[GMU_OOB_PERFCOUNTER_SET] = {
|
||||
.name = "PERFCOUNTER",
|
||||
.set = 17,
|
||||
.ack = 25,
|
||||
.set_new = 28,
|
||||
.ack_new = 30,
|
||||
},
|
||||
|
||||
[GMU_OOB_BOOT_SLUMBER] = {
|
||||
.name = "BOOT_SLUMBER",
|
||||
.set = 22,
|
||||
.ack = 30,
|
||||
},
|
||||
|
||||
[GMU_OOB_DCVS_SET] = {
|
||||
.name = "GPU_DCVS",
|
||||
.set = 23,
|
||||
.ack = 31,
|
||||
},
|
||||
};
|
||||
|
||||
/* Trigger a OOB (out of band) request to the GMU */
|
||||
int a6xx_gmu_set_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state)
|
||||
{
|
||||
int ret;
|
||||
u32 val;
|
||||
int request, ack;
|
||||
const char *name;
|
||||
|
||||
switch (state) {
|
||||
case GMU_OOB_GPU_SET:
|
||||
if (gmu->legacy) {
|
||||
request = GMU_OOB_GPU_SET_REQUEST;
|
||||
ack = GMU_OOB_GPU_SET_ACK;
|
||||
} else {
|
||||
request = GMU_OOB_GPU_SET_REQUEST_NEW;
|
||||
ack = GMU_OOB_GPU_SET_ACK_NEW;
|
||||
}
|
||||
name = "GPU_SET";
|
||||
break;
|
||||
case GMU_OOB_BOOT_SLUMBER:
|
||||
request = GMU_OOB_BOOT_SLUMBER_REQUEST;
|
||||
ack = GMU_OOB_BOOT_SLUMBER_ACK;
|
||||
name = "BOOT_SLUMBER";
|
||||
break;
|
||||
case GMU_OOB_DCVS_SET:
|
||||
request = GMU_OOB_DCVS_REQUEST;
|
||||
ack = GMU_OOB_DCVS_ACK;
|
||||
name = "GPU_DCVS";
|
||||
break;
|
||||
default:
|
||||
if (state >= ARRAY_SIZE(a6xx_gmu_oob_bits))
|
||||
return -EINVAL;
|
||||
|
||||
if (gmu->legacy) {
|
||||
request = a6xx_gmu_oob_bits[state].set;
|
||||
ack = a6xx_gmu_oob_bits[state].ack;
|
||||
} else {
|
||||
request = a6xx_gmu_oob_bits[state].set_new;
|
||||
ack = a6xx_gmu_oob_bits[state].ack_new;
|
||||
if (!request || !ack) {
|
||||
DRM_DEV_ERROR(gmu->dev,
|
||||
"Invalid non-legacy GMU request %s\n",
|
||||
a6xx_gmu_oob_bits[state].name);
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
/* Trigger the equested OOB operation */
|
||||
@ -288,7 +317,7 @@ int a6xx_gmu_set_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state)
|
||||
if (ret)
|
||||
DRM_DEV_ERROR(gmu->dev,
|
||||
"Timeout waiting for GMU OOB set %s: 0x%x\n",
|
||||
name,
|
||||
a6xx_gmu_oob_bits[state].name,
|
||||
gmu_read(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO));
|
||||
|
||||
/* Clear the acknowledge interrupt */
|
||||
@ -300,27 +329,17 @@ int a6xx_gmu_set_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state)
|
||||
/* Clear a pending OOB state in the GMU */
|
||||
void a6xx_gmu_clear_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state)
|
||||
{
|
||||
if (!gmu->legacy) {
|
||||
WARN_ON(state != GMU_OOB_GPU_SET);
|
||||
gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET,
|
||||
1 << GMU_OOB_GPU_SET_CLEAR_NEW);
|
||||
return;
|
||||
}
|
||||
int bit;
|
||||
|
||||
switch (state) {
|
||||
case GMU_OOB_GPU_SET:
|
||||
gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET,
|
||||
1 << GMU_OOB_GPU_SET_CLEAR);
|
||||
break;
|
||||
case GMU_OOB_BOOT_SLUMBER:
|
||||
gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET,
|
||||
1 << GMU_OOB_BOOT_SLUMBER_CLEAR);
|
||||
break;
|
||||
case GMU_OOB_DCVS_SET:
|
||||
gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET,
|
||||
1 << GMU_OOB_DCVS_CLEAR);
|
||||
break;
|
||||
}
|
||||
if (state >= ARRAY_SIZE(a6xx_gmu_oob_bits))
|
||||
return;
|
||||
|
||||
if (gmu->legacy)
|
||||
bit = a6xx_gmu_oob_bits[state].ack;
|
||||
else
|
||||
bit = a6xx_gmu_oob_bits[state].ack_new;
|
||||
|
||||
gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET, bit);
|
||||
}
|
||||
|
||||
/* Enable CPU control of SPTP power power collapse */
|
||||
|
@ -153,44 +153,27 @@ static inline void gmu_write_rscc(struct a6xx_gmu *gmu, u32 offset, u32 value)
|
||||
*/
|
||||
|
||||
enum a6xx_gmu_oob_state {
|
||||
/*
|
||||
* Let the GMU know that a boot or slumber operation has started. The value in
|
||||
* REG_A6XX_GMU_BOOT_SLUMBER_OPTION lets the GMU know which operation we are
|
||||
* doing
|
||||
*/
|
||||
GMU_OOB_BOOT_SLUMBER = 0,
|
||||
/*
|
||||
* Let the GMU know to not turn off any GPU registers while the CPU is in a
|
||||
* critical section
|
||||
*/
|
||||
GMU_OOB_GPU_SET,
|
||||
/*
|
||||
* Set a new power level for the GPU when the CPU is doing frequency scaling
|
||||
*/
|
||||
GMU_OOB_DCVS_SET,
|
||||
/*
|
||||
* Used to keep the GPU on for CPU-side reads of performance counters.
|
||||
*/
|
||||
GMU_OOB_PERFCOUNTER_SET,
|
||||
};
|
||||
|
||||
/* These are the interrupt / ack bits for each OOB request that are set
|
||||
* in a6xx_gmu_set_oob and a6xx_clear_oob
|
||||
*/
|
||||
|
||||
/*
|
||||
* Let the GMU know that a boot or slumber operation has started. The value in
|
||||
* REG_A6XX_GMU_BOOT_SLUMBER_OPTION lets the GMU know which operation we are
|
||||
* doing
|
||||
*/
|
||||
#define GMU_OOB_BOOT_SLUMBER_REQUEST 22
|
||||
#define GMU_OOB_BOOT_SLUMBER_ACK 30
|
||||
#define GMU_OOB_BOOT_SLUMBER_CLEAR 30
|
||||
|
||||
/*
|
||||
* Set a new power level for the GPU when the CPU is doing frequency scaling
|
||||
*/
|
||||
#define GMU_OOB_DCVS_REQUEST 23
|
||||
#define GMU_OOB_DCVS_ACK 31
|
||||
#define GMU_OOB_DCVS_CLEAR 31
|
||||
|
||||
/*
|
||||
* Let the GMU know to not turn off any GPU registers while the CPU is in a
|
||||
* critical section
|
||||
*/
|
||||
#define GMU_OOB_GPU_SET_REQUEST 16
|
||||
#define GMU_OOB_GPU_SET_ACK 24
|
||||
#define GMU_OOB_GPU_SET_CLEAR 24
|
||||
|
||||
#define GMU_OOB_GPU_SET_REQUEST_NEW 30
|
||||
#define GMU_OOB_GPU_SET_ACK_NEW 31
|
||||
#define GMU_OOB_GPU_SET_CLEAR_NEW 31
|
||||
|
||||
|
||||
void a6xx_hfi_init(struct a6xx_gmu *gmu);
|
||||
int a6xx_hfi_start(struct a6xx_gmu *gmu, int boot_state);
|
||||
void a6xx_hfi_stop(struct a6xx_gmu *gmu);
|
||||
|
@ -10,6 +10,7 @@
|
||||
|
||||
#include <linux/bitfield.h>
|
||||
#include <linux/devfreq.h>
|
||||
#include <linux/nvmem-consumer.h>
|
||||
#include <linux/soc/qcom/llcc-qcom.h>
|
||||
|
||||
#define GPU_PAS_ID 13
|
||||
@ -1117,7 +1118,7 @@ static void a6xx_llc_slices_init(struct platform_device *pdev,
|
||||
a6xx_gpu->llc_slice = llcc_slice_getd(LLCC_GPU);
|
||||
a6xx_gpu->htw_llc_slice = llcc_slice_getd(LLCC_GPUHTW);
|
||||
|
||||
if (IS_ERR(a6xx_gpu->llc_slice) && IS_ERR(a6xx_gpu->htw_llc_slice))
|
||||
if (IS_ERR_OR_NULL(a6xx_gpu->llc_slice) && IS_ERR_OR_NULL(a6xx_gpu->htw_llc_slice))
|
||||
a6xx_gpu->llc_mmio = ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
@ -1169,14 +1170,18 @@ static int a6xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value)
|
||||
{
|
||||
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
|
||||
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
|
||||
static DEFINE_MUTEX(perfcounter_oob);
|
||||
|
||||
mutex_lock(&perfcounter_oob);
|
||||
|
||||
/* Force the GPU power on so we can read this register */
|
||||
a6xx_gmu_set_oob(&a6xx_gpu->gmu, GMU_OOB_GPU_SET);
|
||||
a6xx_gmu_set_oob(&a6xx_gpu->gmu, GMU_OOB_PERFCOUNTER_SET);
|
||||
|
||||
*value = gpu_read64(gpu, REG_A6XX_RBBM_PERFCTR_CP_0_LO,
|
||||
REG_A6XX_RBBM_PERFCTR_CP_0_HI);
|
||||
|
||||
a6xx_gmu_clear_oob(&a6xx_gpu->gmu, GMU_OOB_GPU_SET);
|
||||
a6xx_gmu_clear_oob(&a6xx_gpu->gmu, GMU_OOB_PERFCOUNTER_SET);
|
||||
mutex_unlock(&perfcounter_oob);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1208,6 +1213,10 @@ static void a6xx_destroy(struct msm_gpu *gpu)
|
||||
a6xx_gmu_remove(a6xx_gpu);
|
||||
|
||||
adreno_gpu_cleanup(adreno_gpu);
|
||||
|
||||
if (a6xx_gpu->opp_table)
|
||||
dev_pm_opp_put_supported_hw(a6xx_gpu->opp_table);
|
||||
|
||||
kfree(a6xx_gpu);
|
||||
}
|
||||
|
||||
@ -1239,6 +1248,50 @@ static unsigned long a6xx_gpu_busy(struct msm_gpu *gpu)
|
||||
return (unsigned long)busy_time;
|
||||
}
|
||||
|
||||
static struct msm_gem_address_space *
|
||||
a6xx_create_address_space(struct msm_gpu *gpu, struct platform_device *pdev)
|
||||
{
|
||||
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
|
||||
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
|
||||
struct iommu_domain *iommu;
|
||||
struct msm_mmu *mmu;
|
||||
struct msm_gem_address_space *aspace;
|
||||
u64 start, size;
|
||||
|
||||
iommu = iommu_domain_alloc(&platform_bus_type);
|
||||
if (!iommu)
|
||||
return NULL;
|
||||
|
||||
/*
|
||||
* This allows GPU to set the bus attributes required to use system
|
||||
* cache on behalf of the iommu page table walker.
|
||||
*/
|
||||
if (!IS_ERR_OR_NULL(a6xx_gpu->htw_llc_slice))
|
||||
adreno_set_llc_attributes(iommu);
|
||||
|
||||
mmu = msm_iommu_new(&pdev->dev, iommu);
|
||||
if (IS_ERR(mmu)) {
|
||||
iommu_domain_free(iommu);
|
||||
return ERR_CAST(mmu);
|
||||
}
|
||||
|
||||
/*
|
||||
* Use the aperture start or SZ_16M, whichever is greater. This will
|
||||
* ensure that we align with the allocated pagetable range while still
|
||||
* allowing room in the lower 32 bits for GMEM and whatnot
|
||||
*/
|
||||
start = max_t(u64, SZ_16M, iommu->geometry.aperture_start);
|
||||
size = iommu->geometry.aperture_end - start + 1;
|
||||
|
||||
aspace = msm_gem_address_space_create(mmu, "gpu",
|
||||
start & GENMASK_ULL(48, 0), size);
|
||||
|
||||
if (IS_ERR(aspace) && !IS_ERR(mmu))
|
||||
mmu->funcs->destroy(mmu);
|
||||
|
||||
return aspace;
|
||||
}
|
||||
|
||||
static struct msm_gem_address_space *
|
||||
a6xx_create_private_address_space(struct msm_gpu *gpu)
|
||||
{
|
||||
@ -1264,6 +1317,78 @@ static uint32_t a6xx_get_rptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
|
||||
return ring->memptrs->rptr = gpu_read(gpu, REG_A6XX_CP_RB_RPTR);
|
||||
}
|
||||
|
||||
static u32 a618_get_speed_bin(u32 fuse)
|
||||
{
|
||||
if (fuse == 0)
|
||||
return 0;
|
||||
else if (fuse == 169)
|
||||
return 1;
|
||||
else if (fuse == 174)
|
||||
return 2;
|
||||
|
||||
return UINT_MAX;
|
||||
}
|
||||
|
||||
static u32 fuse_to_supp_hw(struct device *dev, u32 revn, u32 fuse)
|
||||
{
|
||||
u32 val = UINT_MAX;
|
||||
|
||||
if (revn == 618)
|
||||
val = a618_get_speed_bin(fuse);
|
||||
|
||||
if (val == UINT_MAX) {
|
||||
DRM_DEV_ERROR(dev,
|
||||
"missing support for speed-bin: %u. Some OPPs may not be supported by hardware",
|
||||
fuse);
|
||||
return UINT_MAX;
|
||||
}
|
||||
|
||||
return (1 << val);
|
||||
}
|
||||
|
||||
static int a6xx_set_supported_hw(struct device *dev, struct a6xx_gpu *a6xx_gpu,
|
||||
u32 revn)
|
||||
{
|
||||
struct opp_table *opp_table;
|
||||
struct nvmem_cell *cell;
|
||||
u32 supp_hw = UINT_MAX;
|
||||
void *buf;
|
||||
|
||||
cell = nvmem_cell_get(dev, "speed_bin");
|
||||
/*
|
||||
* -ENOENT means that the platform doesn't support speedbin which is
|
||||
* fine
|
||||
*/
|
||||
if (PTR_ERR(cell) == -ENOENT)
|
||||
return 0;
|
||||
else if (IS_ERR(cell)) {
|
||||
DRM_DEV_ERROR(dev,
|
||||
"failed to read speed-bin. Some OPPs may not be supported by hardware");
|
||||
goto done;
|
||||
}
|
||||
|
||||
buf = nvmem_cell_read(cell, NULL);
|
||||
if (IS_ERR(buf)) {
|
||||
nvmem_cell_put(cell);
|
||||
DRM_DEV_ERROR(dev,
|
||||
"failed to read speed-bin. Some OPPs may not be supported by hardware");
|
||||
goto done;
|
||||
}
|
||||
|
||||
supp_hw = fuse_to_supp_hw(dev, revn, *((u32 *) buf));
|
||||
|
||||
kfree(buf);
|
||||
nvmem_cell_put(cell);
|
||||
|
||||
done:
|
||||
opp_table = dev_pm_opp_set_supported_hw(dev, &supp_hw, 1);
|
||||
if (IS_ERR(opp_table))
|
||||
return PTR_ERR(opp_table);
|
||||
|
||||
a6xx_gpu->opp_table = opp_table;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct adreno_gpu_funcs funcs = {
|
||||
.base = {
|
||||
.get_param = adreno_get_param,
|
||||
@ -1285,7 +1410,7 @@ static const struct adreno_gpu_funcs funcs = {
|
||||
.gpu_state_get = a6xx_gpu_state_get,
|
||||
.gpu_state_put = a6xx_gpu_state_put,
|
||||
#endif
|
||||
.create_address_space = adreno_iommu_create_address_space,
|
||||
.create_address_space = a6xx_create_address_space,
|
||||
.create_private_address_space = a6xx_create_private_address_space,
|
||||
.get_rptr = a6xx_get_rptr,
|
||||
},
|
||||
@ -1325,6 +1450,12 @@ struct msm_gpu *a6xx_gpu_init(struct drm_device *dev)
|
||||
|
||||
a6xx_llc_slices_init(pdev, a6xx_gpu);
|
||||
|
||||
ret = a6xx_set_supported_hw(&pdev->dev, a6xx_gpu, info->revn);
|
||||
if (ret) {
|
||||
a6xx_destroy(&(a6xx_gpu->base.base));
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1);
|
||||
if (ret) {
|
||||
a6xx_destroy(&(a6xx_gpu->base.base));
|
||||
|
@ -33,6 +33,8 @@ struct a6xx_gpu {
|
||||
void *llc_slice;
|
||||
void *htw_llc_slice;
|
||||
bool have_mmu500;
|
||||
|
||||
struct opp_table *opp_table;
|
||||
};
|
||||
|
||||
#define to_a6xx_gpu(x) container_of(x, struct a6xx_gpu, base)
|
||||
|
@ -133,6 +133,41 @@ static const struct adreno_info gpulist[] = {
|
||||
.gmem = (SZ_1M + SZ_512K),
|
||||
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
|
||||
.init = a4xx_gpu_init,
|
||||
}, {
|
||||
.rev = ADRENO_REV(5, 0, 8, ANY_ID),
|
||||
.revn = 508,
|
||||
.name = "A508",
|
||||
.fw = {
|
||||
[ADRENO_FW_PM4] = "a530_pm4.fw",
|
||||
[ADRENO_FW_PFP] = "a530_pfp.fw",
|
||||
},
|
||||
.gmem = (SZ_128K + SZ_8K),
|
||||
/*
|
||||
* Increase inactive period to 250 to avoid bouncing
|
||||
* the GDSC which appears to make it grumpy
|
||||
*/
|
||||
.inactive_period = 250,
|
||||
.quirks = ADRENO_QUIRK_LMLOADKILL_DISABLE,
|
||||
.init = a5xx_gpu_init,
|
||||
.zapfw = "a508_zap.mdt",
|
||||
}, {
|
||||
.rev = ADRENO_REV(5, 0, 9, ANY_ID),
|
||||
.revn = 509,
|
||||
.name = "A509",
|
||||
.fw = {
|
||||
[ADRENO_FW_PM4] = "a530_pm4.fw",
|
||||
[ADRENO_FW_PFP] = "a530_pfp.fw",
|
||||
},
|
||||
.gmem = (SZ_256K + SZ_16K),
|
||||
/*
|
||||
* Increase inactive period to 250 to avoid bouncing
|
||||
* the GDSC which appears to make it grumpy
|
||||
*/
|
||||
.inactive_period = 250,
|
||||
.quirks = ADRENO_QUIRK_LMLOADKILL_DISABLE,
|
||||
.init = a5xx_gpu_init,
|
||||
/* Adreno 509 uses the same ZAP as 512 */
|
||||
.zapfw = "a512_zap.mdt",
|
||||
}, {
|
||||
.rev = ADRENO_REV(5, 1, 0, ANY_ID),
|
||||
.revn = 510,
|
||||
@ -148,6 +183,23 @@ static const struct adreno_info gpulist[] = {
|
||||
*/
|
||||
.inactive_period = 250,
|
||||
.init = a5xx_gpu_init,
|
||||
}, {
|
||||
.rev = ADRENO_REV(5, 1, 2, ANY_ID),
|
||||
.revn = 512,
|
||||
.name = "A512",
|
||||
.fw = {
|
||||
[ADRENO_FW_PM4] = "a530_pm4.fw",
|
||||
[ADRENO_FW_PFP] = "a530_pfp.fw",
|
||||
},
|
||||
.gmem = (SZ_256K + SZ_16K),
|
||||
/*
|
||||
* Increase inactive period to 250 to avoid bouncing
|
||||
* the GDSC which appears to make it grumpy
|
||||
*/
|
||||
.inactive_period = 250,
|
||||
.quirks = ADRENO_QUIRK_LMLOADKILL_DISABLE,
|
||||
.init = a5xx_gpu_init,
|
||||
.zapfw = "a512_zap.mdt",
|
||||
}, {
|
||||
.rev = ADRENO_REV(5, 3, 0, 2),
|
||||
.revn = 530,
|
||||
@ -168,7 +220,7 @@ static const struct adreno_info gpulist[] = {
|
||||
.init = a5xx_gpu_init,
|
||||
.zapfw = "a530_zap.mdt",
|
||||
}, {
|
||||
.rev = ADRENO_REV(5, 4, 0, 2),
|
||||
.rev = ADRENO_REV(5, 4, 0, ANY_ID),
|
||||
.revn = 540,
|
||||
.name = "A540",
|
||||
.fw = {
|
||||
|
@ -186,11 +186,18 @@ int adreno_zap_shader_load(struct msm_gpu *gpu, u32 pasid)
|
||||
return zap_shader_load_mdt(gpu, adreno_gpu->info->zapfw, pasid);
|
||||
}
|
||||
|
||||
void adreno_set_llc_attributes(struct iommu_domain *iommu)
|
||||
{
|
||||
struct io_pgtable_domain_attr pgtbl_cfg;
|
||||
|
||||
pgtbl_cfg.quirks = IO_PGTABLE_QUIRK_ARM_OUTER_WBWA;
|
||||
iommu_domain_set_attr(iommu, DOMAIN_ATTR_IO_PGTABLE_CFG, &pgtbl_cfg);
|
||||
}
|
||||
|
||||
struct msm_gem_address_space *
|
||||
adreno_iommu_create_address_space(struct msm_gpu *gpu,
|
||||
struct platform_device *pdev)
|
||||
{
|
||||
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
|
||||
struct iommu_domain *iommu;
|
||||
struct msm_mmu *mmu;
|
||||
struct msm_gem_address_space *aspace;
|
||||
@ -200,20 +207,6 @@ adreno_iommu_create_address_space(struct msm_gpu *gpu,
|
||||
if (!iommu)
|
||||
return NULL;
|
||||
|
||||
|
||||
if (adreno_is_a6xx(adreno_gpu)) {
|
||||
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
|
||||
struct io_pgtable_domain_attr pgtbl_cfg;
|
||||
/*
|
||||
* This allows GPU to set the bus attributes required to use system
|
||||
* cache on behalf of the iommu page table walker.
|
||||
*/
|
||||
if (!IS_ERR(a6xx_gpu->htw_llc_slice)) {
|
||||
pgtbl_cfg.quirks = IO_PGTABLE_QUIRK_ARM_OUTER_WBWA;
|
||||
iommu_domain_set_attr(iommu, DOMAIN_ATTR_IO_PGTABLE_CFG, &pgtbl_cfg);
|
||||
}
|
||||
}
|
||||
|
||||
mmu = msm_iommu_new(&pdev->dev, iommu);
|
||||
if (IS_ERR(mmu)) {
|
||||
iommu_domain_free(iommu);
|
||||
|
@ -197,11 +197,26 @@ static inline int adreno_is_a430(struct adreno_gpu *gpu)
|
||||
return gpu->revn == 430;
|
||||
}
|
||||
|
||||
static inline int adreno_is_a508(struct adreno_gpu *gpu)
|
||||
{
|
||||
return gpu->revn == 508;
|
||||
}
|
||||
|
||||
static inline int adreno_is_a509(struct adreno_gpu *gpu)
|
||||
{
|
||||
return gpu->revn == 509;
|
||||
}
|
||||
|
||||
static inline int adreno_is_a510(struct adreno_gpu *gpu)
|
||||
{
|
||||
return gpu->revn == 510;
|
||||
}
|
||||
|
||||
static inline int adreno_is_a512(struct adreno_gpu *gpu)
|
||||
{
|
||||
return gpu->revn == 512;
|
||||
}
|
||||
|
||||
static inline int adreno_is_a530(struct adreno_gpu *gpu)
|
||||
{
|
||||
return gpu->revn == 530;
|
||||
@ -212,11 +227,6 @@ static inline int adreno_is_a540(struct adreno_gpu *gpu)
|
||||
return gpu->revn == 540;
|
||||
}
|
||||
|
||||
static inline bool adreno_is_a6xx(struct adreno_gpu *gpu)
|
||||
{
|
||||
return ((gpu->revn < 700 && gpu->revn > 599));
|
||||
}
|
||||
|
||||
static inline int adreno_is_a618(struct adreno_gpu *gpu)
|
||||
{
|
||||
return gpu->revn == 618;
|
||||
@ -278,6 +288,8 @@ struct msm_gem_address_space *
|
||||
adreno_iommu_create_address_space(struct msm_gpu *gpu,
|
||||
struct platform_device *pdev);
|
||||
|
||||
void adreno_set_llc_attributes(struct iommu_domain *iommu);
|
||||
|
||||
/*
|
||||
* For a5xx and a6xx targets load the zap shader that is used to pull the GPU
|
||||
* out of secure mode
|
||||
|
@ -4,8 +4,10 @@
|
||||
*/
|
||||
|
||||
#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
|
||||
#include <linux/delay.h>
|
||||
#include "dpu_encoder_phys.h"
|
||||
#include "dpu_hw_interrupts.h"
|
||||
#include "dpu_hw_pingpong.h"
|
||||
#include "dpu_core_irq.h"
|
||||
#include "dpu_formats.h"
|
||||
#include "dpu_trace.h"
|
||||
@ -35,6 +37,8 @@
|
||||
|
||||
#define DPU_ENC_WR_PTR_START_TIMEOUT_US 20000
|
||||
|
||||
#define DPU_ENC_MAX_POLL_TIMEOUT_US 2000
|
||||
|
||||
static bool dpu_encoder_phys_cmd_is_master(struct dpu_encoder_phys *phys_enc)
|
||||
{
|
||||
return (phys_enc->split_role != ENC_ROLE_SLAVE) ? true : false;
|
||||
@ -368,15 +372,12 @@ static void dpu_encoder_phys_cmd_tearcheck_config(
|
||||
tc_cfg.vsync_count = vsync_hz /
|
||||
(mode->vtotal * drm_mode_vrefresh(mode));
|
||||
|
||||
/* enable external TE after kickoff to avoid premature autorefresh */
|
||||
tc_cfg.hw_vsync_mode = 0;
|
||||
|
||||
/*
|
||||
* By setting sync_cfg_height to near max register value, we essentially
|
||||
* disable dpu hw generated TE signal, since hw TE will arrive first.
|
||||
* Only caveat is if due to error, we hit wrap-around.
|
||||
* Set the sync_cfg_height to twice vtotal so that if we lose a
|
||||
* TE event coming from the display TE pin we won't stall immediately
|
||||
*/
|
||||
tc_cfg.sync_cfg_height = 0xFFF0;
|
||||
tc_cfg.hw_vsync_mode = 1;
|
||||
tc_cfg.sync_cfg_height = mode->vtotal * 2;
|
||||
tc_cfg.vsync_init_val = mode->vdisplay;
|
||||
tc_cfg.sync_threshold_start = DEFAULT_TEARCHECK_SYNC_THRESH_START;
|
||||
tc_cfg.sync_threshold_continue = DEFAULT_TEARCHECK_SYNC_THRESH_CONTINUE;
|
||||
@ -580,6 +581,69 @@ static void dpu_encoder_phys_cmd_prepare_for_kickoff(
|
||||
atomic_read(&phys_enc->pending_kickoff_cnt));
|
||||
}
|
||||
|
||||
static bool dpu_encoder_phys_cmd_is_ongoing_pptx(
|
||||
struct dpu_encoder_phys *phys_enc)
|
||||
{
|
||||
struct dpu_hw_pp_vsync_info info;
|
||||
|
||||
if (!phys_enc)
|
||||
return false;
|
||||
|
||||
phys_enc->hw_pp->ops.get_vsync_info(phys_enc->hw_pp, &info);
|
||||
if (info.wr_ptr_line_count > 0 &&
|
||||
info.wr_ptr_line_count < phys_enc->cached_mode.vdisplay)
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static void dpu_encoder_phys_cmd_prepare_commit(
|
||||
struct dpu_encoder_phys *phys_enc)
|
||||
{
|
||||
struct dpu_encoder_phys_cmd *cmd_enc =
|
||||
to_dpu_encoder_phys_cmd(phys_enc);
|
||||
int trial = 0;
|
||||
|
||||
if (!phys_enc)
|
||||
return;
|
||||
if (!phys_enc->hw_pp)
|
||||
return;
|
||||
if (!dpu_encoder_phys_cmd_is_master(phys_enc))
|
||||
return;
|
||||
|
||||
/* If autorefresh is already disabled, we have nothing to do */
|
||||
if (!phys_enc->hw_pp->ops.get_autorefresh(phys_enc->hw_pp, NULL))
|
||||
return;
|
||||
|
||||
/*
|
||||
* If autorefresh is enabled, disable it and make sure it is safe to
|
||||
* proceed with current frame commit/push. Sequence fallowed is,
|
||||
* 1. Disable TE
|
||||
* 2. Disable autorefresh config
|
||||
* 4. Poll for frame transfer ongoing to be false
|
||||
* 5. Enable TE back
|
||||
*/
|
||||
_dpu_encoder_phys_cmd_connect_te(phys_enc, false);
|
||||
phys_enc->hw_pp->ops.setup_autorefresh(phys_enc->hw_pp, 0, false);
|
||||
|
||||
do {
|
||||
udelay(DPU_ENC_MAX_POLL_TIMEOUT_US);
|
||||
if ((trial * DPU_ENC_MAX_POLL_TIMEOUT_US)
|
||||
> (KICKOFF_TIMEOUT_MS * USEC_PER_MSEC)) {
|
||||
DPU_ERROR_CMDENC(cmd_enc,
|
||||
"disable autorefresh failed\n");
|
||||
break;
|
||||
}
|
||||
|
||||
trial++;
|
||||
} while (dpu_encoder_phys_cmd_is_ongoing_pptx(phys_enc));
|
||||
|
||||
_dpu_encoder_phys_cmd_connect_te(phys_enc, true);
|
||||
|
||||
DPU_DEBUG_CMDENC(to_dpu_encoder_phys_cmd(phys_enc),
|
||||
"disabled autorefresh\n");
|
||||
}
|
||||
|
||||
static int _dpu_encoder_phys_cmd_wait_for_ctl_start(
|
||||
struct dpu_encoder_phys *phys_enc)
|
||||
{
|
||||
@ -621,20 +685,15 @@ static int dpu_encoder_phys_cmd_wait_for_tx_complete(
|
||||
static int dpu_encoder_phys_cmd_wait_for_commit_done(
|
||||
struct dpu_encoder_phys *phys_enc)
|
||||
{
|
||||
int rc = 0;
|
||||
struct dpu_encoder_phys_cmd *cmd_enc;
|
||||
|
||||
cmd_enc = to_dpu_encoder_phys_cmd(phys_enc);
|
||||
|
||||
/* only required for master controller */
|
||||
if (dpu_encoder_phys_cmd_is_master(phys_enc))
|
||||
rc = _dpu_encoder_phys_cmd_wait_for_ctl_start(phys_enc);
|
||||
if (!dpu_encoder_phys_cmd_is_master(phys_enc))
|
||||
return 0;
|
||||
|
||||
/* required for both controllers */
|
||||
if (!rc && cmd_enc->serialize_wait4pp)
|
||||
dpu_encoder_phys_cmd_prepare_for_kickoff(phys_enc);
|
||||
|
||||
return rc;
|
||||
return _dpu_encoder_phys_cmd_wait_for_ctl_start(phys_enc);
|
||||
}
|
||||
|
||||
static int dpu_encoder_phys_cmd_wait_for_vblank(
|
||||
@ -681,6 +740,7 @@ static void dpu_encoder_phys_cmd_trigger_start(
|
||||
static void dpu_encoder_phys_cmd_init_ops(
|
||||
struct dpu_encoder_phys_ops *ops)
|
||||
{
|
||||
ops->prepare_commit = dpu_encoder_phys_cmd_prepare_commit;
|
||||
ops->is_master = dpu_encoder_phys_cmd_is_master;
|
||||
ops->mode_set = dpu_encoder_phys_cmd_mode_set;
|
||||
ops->mode_fixup = dpu_encoder_phys_cmd_mode_fixup;
|
||||
|
@ -12,14 +12,17 @@
|
||||
|
||||
#define VIG_MASK \
|
||||
(BIT(DPU_SSPP_SRC) | BIT(DPU_SSPP_QOS) |\
|
||||
BIT(DPU_SSPP_CSC_10BIT) | BIT(DPU_SSPP_CDP) | BIT(DPU_SSPP_QOS_8LVL) |\
|
||||
BIT(DPU_SSPP_CSC_10BIT) | BIT(DPU_SSPP_CDP) |\
|
||||
BIT(DPU_SSPP_TS_PREFILL) | BIT(DPU_SSPP_EXCL_RECT))
|
||||
|
||||
#define VIG_SDM845_MASK \
|
||||
(VIG_MASK | BIT(DPU_SSPP_SCALER_QSEED3))
|
||||
(VIG_MASK | BIT(DPU_SSPP_QOS_8LVL) | BIT(DPU_SSPP_SCALER_QSEED3))
|
||||
|
||||
#define VIG_SC7180_MASK \
|
||||
(VIG_MASK | BIT(DPU_SSPP_SCALER_QSEED4))
|
||||
(VIG_MASK | BIT(DPU_SSPP_QOS_8LVL) | BIT(DPU_SSPP_SCALER_QSEED4))
|
||||
|
||||
#define VIG_SM8250_MASK \
|
||||
(VIG_MASK | BIT(DPU_SSPP_SCALER_QSEED3LITE))
|
||||
|
||||
#define DMA_SDM845_MASK \
|
||||
(BIT(DPU_SSPP_SRC) | BIT(DPU_SSPP_QOS) | BIT(DPU_SSPP_QOS_8LVL) |\
|
||||
@ -185,7 +188,7 @@ static const struct dpu_caps sm8150_dpu_caps = {
|
||||
static const struct dpu_caps sm8250_dpu_caps = {
|
||||
.max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
|
||||
.max_mixer_blendstages = 0xb,
|
||||
.qseed_type = DPU_SSPP_SCALER_QSEED3, /* TODO: qseed3 lite */
|
||||
.qseed_type = DPU_SSPP_SCALER_QSEED3LITE,
|
||||
.smart_dma_rev = DPU_SSPP_SMART_DMA_V2, /* TODO: v2.5 */
|
||||
.ubwc_version = DPU_HW_UBWC_VER_40,
|
||||
.has_src_split = true,
|
||||
@ -444,6 +447,34 @@ static const struct dpu_sspp_cfg sc7180_sspp[] = {
|
||||
sdm845_dma_sblk_2, 9, SSPP_TYPE_DMA, DPU_CLK_CTRL_CURSOR1),
|
||||
};
|
||||
|
||||
static const struct dpu_sspp_sub_blks sm8250_vig_sblk_0 =
|
||||
_VIG_SBLK("0", 5, DPU_SSPP_SCALER_QSEED3LITE);
|
||||
static const struct dpu_sspp_sub_blks sm8250_vig_sblk_1 =
|
||||
_VIG_SBLK("1", 6, DPU_SSPP_SCALER_QSEED3LITE);
|
||||
static const struct dpu_sspp_sub_blks sm8250_vig_sblk_2 =
|
||||
_VIG_SBLK("2", 7, DPU_SSPP_SCALER_QSEED3LITE);
|
||||
static const struct dpu_sspp_sub_blks sm8250_vig_sblk_3 =
|
||||
_VIG_SBLK("3", 8, DPU_SSPP_SCALER_QSEED3LITE);
|
||||
|
||||
static const struct dpu_sspp_cfg sm8250_sspp[] = {
|
||||
SSPP_BLK("sspp_0", SSPP_VIG0, 0x4000, VIG_SM8250_MASK,
|
||||
sm8250_vig_sblk_0, 0, SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG0),
|
||||
SSPP_BLK("sspp_1", SSPP_VIG1, 0x6000, VIG_SM8250_MASK,
|
||||
sm8250_vig_sblk_1, 4, SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG1),
|
||||
SSPP_BLK("sspp_2", SSPP_VIG2, 0x8000, VIG_SM8250_MASK,
|
||||
sm8250_vig_sblk_2, 8, SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG2),
|
||||
SSPP_BLK("sspp_3", SSPP_VIG3, 0xa000, VIG_SM8250_MASK,
|
||||
sm8250_vig_sblk_3, 12, SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG3),
|
||||
SSPP_BLK("sspp_8", SSPP_DMA0, 0x24000, DMA_SDM845_MASK,
|
||||
sdm845_dma_sblk_0, 1, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA0),
|
||||
SSPP_BLK("sspp_9", SSPP_DMA1, 0x26000, DMA_SDM845_MASK,
|
||||
sdm845_dma_sblk_1, 5, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA1),
|
||||
SSPP_BLK("sspp_10", SSPP_DMA2, 0x28000, DMA_CURSOR_SDM845_MASK,
|
||||
sdm845_dma_sblk_2, 9, SSPP_TYPE_DMA, DPU_CLK_CTRL_CURSOR0),
|
||||
SSPP_BLK("sspp_11", SSPP_DMA3, 0x2a000, DMA_CURSOR_SDM845_MASK,
|
||||
sdm845_dma_sblk_3, 13, SSPP_TYPE_DMA, DPU_CLK_CTRL_CURSOR1),
|
||||
};
|
||||
|
||||
/*************************************************************
|
||||
* MIXER sub blocks config
|
||||
*************************************************************/
|
||||
@ -532,23 +563,28 @@ static const struct dpu_dspp_sub_blks sm8150_dspp_sblk = {
|
||||
.len = 0x90, .version = 0x40000},
|
||||
};
|
||||
|
||||
#define DSPP_BLK(_name, _id, _base, _sblk) \
|
||||
#define DSPP_BLK(_name, _id, _base, _mask, _sblk) \
|
||||
{\
|
||||
.name = _name, .id = _id, \
|
||||
.base = _base, .len = 0x1800, \
|
||||
.features = DSPP_SC7180_MASK, \
|
||||
.features = _mask, \
|
||||
.sblk = _sblk \
|
||||
}
|
||||
|
||||
static const struct dpu_dspp_cfg sc7180_dspp[] = {
|
||||
DSPP_BLK("dspp_0", DSPP_0, 0x54000, &sc7180_dspp_sblk),
|
||||
DSPP_BLK("dspp_0", DSPP_0, 0x54000, DSPP_SC7180_MASK,
|
||||
&sc7180_dspp_sblk),
|
||||
};
|
||||
|
||||
static const struct dpu_dspp_cfg sm8150_dspp[] = {
|
||||
DSPP_BLK("dspp_0", DSPP_0, 0x54000, &sm8150_dspp_sblk),
|
||||
DSPP_BLK("dspp_1", DSPP_1, 0x56000, &sm8150_dspp_sblk),
|
||||
DSPP_BLK("dspp_2", DSPP_2, 0x58000, &sm8150_dspp_sblk),
|
||||
DSPP_BLK("dspp_3", DSPP_3, 0x5a000, &sm8150_dspp_sblk),
|
||||
DSPP_BLK("dspp_0", DSPP_0, 0x54000, DSPP_SC7180_MASK,
|
||||
&sm8150_dspp_sblk),
|
||||
DSPP_BLK("dspp_1", DSPP_1, 0x56000, DSPP_SC7180_MASK,
|
||||
&sm8150_dspp_sblk),
|
||||
DSPP_BLK("dspp_2", DSPP_2, 0x58000, DSPP_SC7180_MASK,
|
||||
&sm8150_dspp_sblk),
|
||||
DSPP_BLK("dspp_3", DSPP_3, 0x5a000, DSPP_SC7180_MASK,
|
||||
&sm8150_dspp_sblk),
|
||||
};
|
||||
|
||||
/*************************************************************
|
||||
@ -624,33 +660,33 @@ static const struct dpu_merge_3d_cfg sm8150_merge_3d[] = {
|
||||
/*************************************************************
|
||||
* INTF sub blocks config
|
||||
*************************************************************/
|
||||
#define INTF_BLK(_name, _id, _base, _type, _ctrl_id, _features) \
|
||||
#define INTF_BLK(_name, _id, _base, _type, _ctrl_id, _progfetch, _features) \
|
||||
{\
|
||||
.name = _name, .id = _id, \
|
||||
.base = _base, .len = 0x280, \
|
||||
.features = _features, \
|
||||
.type = _type, \
|
||||
.controller_id = _ctrl_id, \
|
||||
.prog_fetch_lines_worst_case = 24 \
|
||||
.prog_fetch_lines_worst_case = _progfetch \
|
||||
}
|
||||
|
||||
static const struct dpu_intf_cfg sdm845_intf[] = {
|
||||
INTF_BLK("intf_0", INTF_0, 0x6A000, INTF_DP, 0, INTF_SDM845_MASK),
|
||||
INTF_BLK("intf_1", INTF_1, 0x6A800, INTF_DSI, 0, INTF_SDM845_MASK),
|
||||
INTF_BLK("intf_2", INTF_2, 0x6B000, INTF_DSI, 1, INTF_SDM845_MASK),
|
||||
INTF_BLK("intf_3", INTF_3, 0x6B800, INTF_DP, 1, INTF_SDM845_MASK),
|
||||
INTF_BLK("intf_0", INTF_0, 0x6A000, INTF_DP, 0, 24, INTF_SDM845_MASK),
|
||||
INTF_BLK("intf_1", INTF_1, 0x6A800, INTF_DSI, 0, 24, INTF_SDM845_MASK),
|
||||
INTF_BLK("intf_2", INTF_2, 0x6B000, INTF_DSI, 1, 24, INTF_SDM845_MASK),
|
||||
INTF_BLK("intf_3", INTF_3, 0x6B800, INTF_DP, 1, 24, INTF_SDM845_MASK),
|
||||
};
|
||||
|
||||
static const struct dpu_intf_cfg sc7180_intf[] = {
|
||||
INTF_BLK("intf_0", INTF_0, 0x6A000, INTF_DP, 0, INTF_SC7180_MASK),
|
||||
INTF_BLK("intf_1", INTF_1, 0x6A800, INTF_DSI, 0, INTF_SC7180_MASK),
|
||||
INTF_BLK("intf_0", INTF_0, 0x6A000, INTF_DP, 0, 24, INTF_SC7180_MASK),
|
||||
INTF_BLK("intf_1", INTF_1, 0x6A800, INTF_DSI, 0, 24, INTF_SC7180_MASK),
|
||||
};
|
||||
|
||||
static const struct dpu_intf_cfg sm8150_intf[] = {
|
||||
INTF_BLK("intf_0", INTF_0, 0x6A000, INTF_DP, 0, INTF_SC7180_MASK),
|
||||
INTF_BLK("intf_1", INTF_1, 0x6A800, INTF_DSI, 0, INTF_SC7180_MASK),
|
||||
INTF_BLK("intf_2", INTF_2, 0x6B000, INTF_DSI, 1, INTF_SC7180_MASK),
|
||||
INTF_BLK("intf_3", INTF_3, 0x6B800, INTF_DP, 1, INTF_SC7180_MASK),
|
||||
INTF_BLK("intf_0", INTF_0, 0x6A000, INTF_DP, 0, 24, INTF_SC7180_MASK),
|
||||
INTF_BLK("intf_1", INTF_1, 0x6A800, INTF_DSI, 0, 24, INTF_SC7180_MASK),
|
||||
INTF_BLK("intf_2", INTF_2, 0x6B000, INTF_DSI, 1, 24, INTF_SC7180_MASK),
|
||||
INTF_BLK("intf_3", INTF_3, 0x6B800, INTF_DP, 1, 24, INTF_SC7180_MASK),
|
||||
};
|
||||
|
||||
/*************************************************************
|
||||
@ -969,9 +1005,8 @@ static void sm8250_cfg_init(struct dpu_mdss_cfg *dpu_cfg)
|
||||
.mdp = sm8250_mdp,
|
||||
.ctl_count = ARRAY_SIZE(sm8150_ctl),
|
||||
.ctl = sm8150_ctl,
|
||||
/* TODO: sspp qseed version differs from 845 */
|
||||
.sspp_count = ARRAY_SIZE(sdm845_sspp),
|
||||
.sspp = sdm845_sspp,
|
||||
.sspp_count = ARRAY_SIZE(sm8250_sspp),
|
||||
.sspp = sm8250_sspp,
|
||||
.mixer_count = ARRAY_SIZE(sm8150_lm),
|
||||
.mixer = sm8150_lm,
|
||||
.dspp_count = ARRAY_SIZE(sm8150_dspp),
|
||||
|
@ -95,6 +95,7 @@ enum {
|
||||
* @DPU_SSPP_SRC Src and fetch part of the pipes,
|
||||
* @DPU_SSPP_SCALER_QSEED2, QSEED2 algorithm support
|
||||
* @DPU_SSPP_SCALER_QSEED3, QSEED3 alogorithm support
|
||||
* @DPU_SSPP_SCALER_QSEED3LITE, QSEED3 Lite alogorithm support
|
||||
* @DPU_SSPP_SCALER_QSEED4, QSEED4 algorithm support
|
||||
* @DPU_SSPP_SCALER_RGB, RGB Scaler, supported by RGB pipes
|
||||
* @DPU_SSPP_CSC, Support of Color space converion
|
||||
@ -114,6 +115,7 @@ enum {
|
||||
DPU_SSPP_SRC = 0x1,
|
||||
DPU_SSPP_SCALER_QSEED2,
|
||||
DPU_SSPP_SCALER_QSEED3,
|
||||
DPU_SSPP_SCALER_QSEED3LITE,
|
||||
DPU_SSPP_SCALER_QSEED4,
|
||||
DPU_SSPP_SCALER_RGB,
|
||||
DPU_SSPP_CSC,
|
||||
|
@ -23,6 +23,7 @@
|
||||
#define PP_WR_PTR_IRQ 0x024
|
||||
#define PP_OUT_LINE_COUNT 0x028
|
||||
#define PP_LINE_COUNT 0x02C
|
||||
#define PP_AUTOREFRESH_CONFIG 0x030
|
||||
|
||||
#define PP_FBC_MODE 0x034
|
||||
#define PP_FBC_BUDGET_CTL 0x038
|
||||
@ -120,6 +121,29 @@ static int dpu_hw_pp_setup_te_config(struct dpu_hw_pingpong *pp,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void dpu_hw_pp_setup_autorefresh_config(struct dpu_hw_pingpong *pp,
|
||||
u32 frame_count, bool enable)
|
||||
{
|
||||
DPU_REG_WRITE(&pp->hw, PP_AUTOREFRESH_CONFIG,
|
||||
enable ? (BIT(31) | frame_count) : 0);
|
||||
}
|
||||
|
||||
/*
|
||||
* dpu_hw_pp_get_autorefresh_config - Get autorefresh config from HW
|
||||
* @pp: DPU pingpong structure
|
||||
* @frame_count: Used to return the current frame count from hw
|
||||
*
|
||||
* Returns: True if autorefresh enabled, false if disabled.
|
||||
*/
|
||||
static bool dpu_hw_pp_get_autorefresh_config(struct dpu_hw_pingpong *pp,
|
||||
u32 *frame_count)
|
||||
{
|
||||
u32 val = DPU_REG_READ(&pp->hw, PP_AUTOREFRESH_CONFIG);
|
||||
if (frame_count != NULL)
|
||||
*frame_count = val & 0xffff;
|
||||
return !!((val & BIT(31)) >> 31);
|
||||
}
|
||||
|
||||
static int dpu_hw_pp_poll_timeout_wr_ptr(struct dpu_hw_pingpong *pp,
|
||||
u32 timeout_us)
|
||||
{
|
||||
@ -228,6 +252,8 @@ static void _setup_pingpong_ops(struct dpu_hw_pingpong *c,
|
||||
c->ops.enable_tearcheck = dpu_hw_pp_enable_te;
|
||||
c->ops.connect_external_te = dpu_hw_pp_connect_external_te;
|
||||
c->ops.get_vsync_info = dpu_hw_pp_get_vsync_info;
|
||||
c->ops.setup_autorefresh = dpu_hw_pp_setup_autorefresh_config;
|
||||
c->ops.get_autorefresh = dpu_hw_pp_get_autorefresh_config;
|
||||
c->ops.poll_timeout_wr_ptr = dpu_hw_pp_poll_timeout_wr_ptr;
|
||||
c->ops.get_line_count = dpu_hw_pp_get_line_count;
|
||||
|
||||
|
@ -63,6 +63,8 @@ struct dpu_hw_dither_cfg {
|
||||
* @setup_tearcheck : program tear check values
|
||||
* @enable_tearcheck : enables tear check
|
||||
* @get_vsync_info : retries timing info of the panel
|
||||
* @setup_autorefresh : configure and enable the autorefresh config
|
||||
* @get_autorefresh : retrieve autorefresh config from hardware
|
||||
* @setup_dither : function to program the dither hw block
|
||||
* @get_line_count: obtain current vertical line counter
|
||||
*/
|
||||
@ -94,6 +96,18 @@ struct dpu_hw_pingpong_ops {
|
||||
int (*get_vsync_info)(struct dpu_hw_pingpong *pp,
|
||||
struct dpu_hw_pp_vsync_info *info);
|
||||
|
||||
/**
|
||||
* configure and enable the autorefresh config
|
||||
*/
|
||||
void (*setup_autorefresh)(struct dpu_hw_pingpong *pp,
|
||||
u32 frame_count, bool enable);
|
||||
|
||||
/**
|
||||
* retrieve autorefresh config from hardware
|
||||
*/
|
||||
bool (*get_autorefresh)(struct dpu_hw_pingpong *pp,
|
||||
u32 *frame_count);
|
||||
|
||||
/**
|
||||
* poll until write pointer transmission starts
|
||||
* @Return: 0 on success, -ETIMEDOUT on timeout
|
||||
|
@ -673,6 +673,7 @@ static void _setup_layer_ops(struct dpu_hw_pipe *c,
|
||||
c->ops.setup_multirect = dpu_hw_sspp_setup_multirect;
|
||||
|
||||
if (test_bit(DPU_SSPP_SCALER_QSEED3, &features) ||
|
||||
test_bit(DPU_SSPP_SCALER_QSEED3LITE, &features) ||
|
||||
test_bit(DPU_SSPP_SCALER_QSEED4, &features)) {
|
||||
c->ops.setup_scaler = _dpu_hw_sspp_setup_scaler3;
|
||||
c->ops.get_scaler_ver = _dpu_hw_sspp_get_scaler3_ver;
|
||||
|
@ -28,6 +28,7 @@ struct dpu_hw_pipe;
|
||||
#define DPU_SSPP_SCALER ((1UL << DPU_SSPP_SCALER_RGB) | \
|
||||
(1UL << DPU_SSPP_SCALER_QSEED2) | \
|
||||
(1UL << DPU_SSPP_SCALER_QSEED3) | \
|
||||
(1UL << DPU_SSPP_SCALER_QSEED3LITE) | \
|
||||
(1UL << DPU_SSPP_SCALER_QSEED4))
|
||||
|
||||
/**
|
||||
|
@ -59,6 +59,19 @@ static u32 dpu_hw_util_log_mask = DPU_DBG_MASK_NONE;
|
||||
#define QSEED3_SEP_LUT_SIZE \
|
||||
(QSEED3_LUT_SIZE * QSEED3_SEPARABLE_LUTS * sizeof(u32))
|
||||
|
||||
/* DPU_SCALER_QSEED3LITE */
|
||||
#define QSEED3LITE_COEF_LUT_Y_SEP_BIT 4
|
||||
#define QSEED3LITE_COEF_LUT_UV_SEP_BIT 5
|
||||
#define QSEED3LITE_COEF_LUT_CTRL 0x4C
|
||||
#define QSEED3LITE_COEF_LUT_SWAP_BIT 0
|
||||
#define QSEED3LITE_DIR_FILTER_WEIGHT 0x60
|
||||
#define QSEED3LITE_FILTERS 2
|
||||
#define QSEED3LITE_SEPARABLE_LUTS 10
|
||||
#define QSEED3LITE_LUT_SIZE 33
|
||||
#define QSEED3LITE_SEP_LUT_SIZE \
|
||||
(QSEED3LITE_LUT_SIZE * QSEED3LITE_SEPARABLE_LUTS * sizeof(u32))
|
||||
|
||||
|
||||
void dpu_reg_write(struct dpu_hw_blk_reg_map *c,
|
||||
u32 reg_off,
|
||||
u32 val,
|
||||
@ -156,6 +169,57 @@ static void _dpu_hw_setup_scaler3_lut(struct dpu_hw_blk_reg_map *c,
|
||||
|
||||
}
|
||||
|
||||
static void _dpu_hw_setup_scaler3lite_lut(struct dpu_hw_blk_reg_map *c,
|
||||
struct dpu_hw_scaler3_cfg *scaler3_cfg, u32 offset)
|
||||
{
|
||||
int j, filter;
|
||||
int config_lut = 0x0;
|
||||
unsigned long lut_flags;
|
||||
u32 lut_addr, lut_offset;
|
||||
u32 *lut[QSEED3LITE_FILTERS] = {NULL, NULL};
|
||||
static const uint32_t off_tbl[QSEED3_FILTERS] = { 0x000, 0x200 };
|
||||
|
||||
DPU_REG_WRITE(c, QSEED3LITE_DIR_FILTER_WEIGHT + offset, scaler3_cfg->dir_weight);
|
||||
|
||||
if (!scaler3_cfg->sep_lut)
|
||||
return;
|
||||
|
||||
lut_flags = (unsigned long) scaler3_cfg->lut_flag;
|
||||
if (test_bit(QSEED3_COEF_LUT_Y_SEP_BIT, &lut_flags) &&
|
||||
(scaler3_cfg->y_rgb_sep_lut_idx < QSEED3LITE_SEPARABLE_LUTS) &&
|
||||
(scaler3_cfg->sep_len == QSEED3LITE_SEP_LUT_SIZE)) {
|
||||
lut[0] = scaler3_cfg->sep_lut +
|
||||
scaler3_cfg->y_rgb_sep_lut_idx * QSEED3LITE_LUT_SIZE;
|
||||
config_lut = 1;
|
||||
}
|
||||
if (test_bit(QSEED3_COEF_LUT_UV_SEP_BIT, &lut_flags) &&
|
||||
(scaler3_cfg->uv_sep_lut_idx < QSEED3LITE_SEPARABLE_LUTS) &&
|
||||
(scaler3_cfg->sep_len == QSEED3LITE_SEP_LUT_SIZE)) {
|
||||
lut[1] = scaler3_cfg->sep_lut +
|
||||
scaler3_cfg->uv_sep_lut_idx * QSEED3LITE_LUT_SIZE;
|
||||
config_lut = 1;
|
||||
}
|
||||
|
||||
if (config_lut) {
|
||||
for (filter = 0; filter < QSEED3LITE_FILTERS; filter++) {
|
||||
if (!lut[filter])
|
||||
continue;
|
||||
lut_offset = 0;
|
||||
lut_addr = QSEED3_COEF_LUT + offset + off_tbl[filter];
|
||||
for (j = 0; j < QSEED3LITE_LUT_SIZE; j++) {
|
||||
DPU_REG_WRITE(c,
|
||||
lut_addr,
|
||||
(lut[filter])[lut_offset++]);
|
||||
lut_addr += 4;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (test_bit(QSEED3_COEF_LUT_SWAP_BIT, &lut_flags))
|
||||
DPU_REG_WRITE(c, QSEED3_COEF_LUT_CTRL + offset, BIT(0));
|
||||
|
||||
}
|
||||
|
||||
static void _dpu_hw_setup_scaler3_de(struct dpu_hw_blk_reg_map *c,
|
||||
struct dpu_hw_scaler3_de_cfg *de_cfg, u32 offset)
|
||||
{
|
||||
@ -242,9 +306,12 @@ void dpu_hw_setup_scaler3(struct dpu_hw_blk_reg_map *c,
|
||||
op_mode |= BIT(8);
|
||||
}
|
||||
|
||||
if (scaler3_cfg->lut_flag)
|
||||
_dpu_hw_setup_scaler3_lut(c, scaler3_cfg,
|
||||
scaler_offset);
|
||||
if (scaler3_cfg->lut_flag) {
|
||||
if (scaler_version < 0x2004)
|
||||
_dpu_hw_setup_scaler3_lut(c, scaler3_cfg, scaler_offset);
|
||||
else
|
||||
_dpu_hw_setup_scaler3lite_lut(c, scaler3_cfg, scaler_offset);
|
||||
}
|
||||
|
||||
if (scaler_version == 0x1002) {
|
||||
phase_init =
|
||||
|
@ -97,6 +97,7 @@ struct dpu_hw_scaler3_de_cfg {
|
||||
* @ cir_lut: pointer to circular filter LUT
|
||||
* @ sep_lut: pointer to separable filter LUT
|
||||
* @ de: detail enhancer configuration
|
||||
* @ dir_weight: Directional weight
|
||||
*/
|
||||
struct dpu_hw_scaler3_cfg {
|
||||
u32 enable;
|
||||
@ -137,6 +138,8 @@ struct dpu_hw_scaler3_cfg {
|
||||
* Detail enhancer settings
|
||||
*/
|
||||
struct dpu_hw_scaler3_de_cfg de;
|
||||
|
||||
u32 dir_weight;
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -30,7 +30,7 @@
|
||||
#define VBIF_XIN_HALT_CTRL0 0x0200
|
||||
#define VBIF_XIN_HALT_CTRL1 0x0204
|
||||
#define VBIF_XINL_QOS_RP_REMAP_000 0x0550
|
||||
#define VBIF_XINL_QOS_LVL_REMAP_000 0x0590
|
||||
#define VBIF_XINL_QOS_LVL_REMAP_000(v) (v < DPU_HW_VER_400 ? 0x570 : 0x0590)
|
||||
|
||||
static void dpu_hw_clear_errors(struct dpu_hw_vbif *vbif,
|
||||
u32 *pnd_errors, u32 *src_errors)
|
||||
@ -156,18 +156,19 @@ static void dpu_hw_set_qos_remap(struct dpu_hw_vbif *vbif,
|
||||
u32 xin_id, u32 level, u32 remap_level)
|
||||
{
|
||||
struct dpu_hw_blk_reg_map *c;
|
||||
u32 reg_val, reg_val_lvl, mask, reg_high, reg_shift;
|
||||
u32 reg_lvl, reg_val, reg_val_lvl, mask, reg_high, reg_shift;
|
||||
|
||||
if (!vbif)
|
||||
return;
|
||||
|
||||
c = &vbif->hw;
|
||||
|
||||
reg_lvl = VBIF_XINL_QOS_LVL_REMAP_000(c->hwversion);
|
||||
reg_high = ((xin_id & 0x8) >> 3) * 4 + (level * 8);
|
||||
reg_shift = (xin_id & 0x7) * 4;
|
||||
|
||||
reg_val = DPU_REG_READ(c, VBIF_XINL_QOS_RP_REMAP_000 + reg_high);
|
||||
reg_val_lvl = DPU_REG_READ(c, VBIF_XINL_QOS_LVL_REMAP_000 + reg_high);
|
||||
reg_val_lvl = DPU_REG_READ(c, reg_lvl + reg_high);
|
||||
|
||||
mask = 0x7 << reg_shift;
|
||||
|
||||
@ -178,7 +179,7 @@ static void dpu_hw_set_qos_remap(struct dpu_hw_vbif *vbif,
|
||||
reg_val_lvl |= (remap_level << reg_shift) & mask;
|
||||
|
||||
DPU_REG_WRITE(c, VBIF_XINL_QOS_RP_REMAP_000 + reg_high, reg_val);
|
||||
DPU_REG_WRITE(c, VBIF_XINL_QOS_LVL_REMAP_000 + reg_high, reg_val_lvl);
|
||||
DPU_REG_WRITE(c, reg_lvl + reg_high, reg_val_lvl);
|
||||
}
|
||||
|
||||
static void dpu_hw_set_write_gather_en(struct dpu_hw_vbif *vbif, u32 xin_id)
|
||||
|
@ -749,7 +749,7 @@ static void _dpu_kms_set_encoder_mode(struct msm_kms *kms,
|
||||
case DRM_MODE_ENCODER_TMDS:
|
||||
info.num_of_h_tiles = 1;
|
||||
break;
|
||||
};
|
||||
}
|
||||
|
||||
rc = dpu_encoder_setup(encoder->dev, encoder, &info);
|
||||
if (rc)
|
||||
|
@ -1465,6 +1465,7 @@ static int _dpu_plane_init_debugfs(struct drm_plane *plane)
|
||||
pdpu->debugfs_root, &pdpu->debugfs_src);
|
||||
|
||||
if (cfg->features & BIT(DPU_SSPP_SCALER_QSEED3) ||
|
||||
cfg->features & BIT(DPU_SSPP_SCALER_QSEED3LITE) ||
|
||||
cfg->features & BIT(DPU_SSPP_SCALER_QSEED2) ||
|
||||
cfg->features & BIT(DPU_SSPP_SCALER_QSEED4)) {
|
||||
dpu_debugfs_setup_regset32(&pdpu->debugfs_scaler,
|
||||
|
@ -177,7 +177,7 @@ static const struct mdp5_cfg_hw msm8x74v2_config = {
|
||||
[3] = INTF_HDMI,
|
||||
},
|
||||
},
|
||||
.max_clk = 200000000,
|
||||
.max_clk = 320000000,
|
||||
};
|
||||
|
||||
static const struct mdp5_cfg_hw apq8084_config = {
|
||||
|
@ -1180,7 +1180,7 @@ static void mdp5_crtc_pp_done_irq(struct mdp_irq *irq, uint32_t irqstatus)
|
||||
struct mdp5_crtc *mdp5_crtc = container_of(irq, struct mdp5_crtc,
|
||||
pp_done);
|
||||
|
||||
complete(&mdp5_crtc->pp_completion);
|
||||
complete_all(&mdp5_crtc->pp_completion);
|
||||
}
|
||||
|
||||
static void mdp5_crtc_wait_for_pp_done(struct drm_crtc *crtc)
|
||||
|
@ -336,7 +336,6 @@ static ssize_t dp_aux_transfer(struct drm_dp_aux *dp_aux,
|
||||
ssize_t ret;
|
||||
int const aux_cmd_native_max = 16;
|
||||
int const aux_cmd_i2c_max = 128;
|
||||
int const retry_count = 5;
|
||||
struct dp_aux_private *aux = container_of(dp_aux,
|
||||
struct dp_aux_private, dp_aux);
|
||||
|
||||
@ -378,12 +377,6 @@ static ssize_t dp_aux_transfer(struct drm_dp_aux *dp_aux,
|
||||
ret = dp_aux_cmd_fifo_tx(aux, msg);
|
||||
|
||||
if (ret < 0) {
|
||||
if (aux->native) {
|
||||
aux->retry_cnt++;
|
||||
if (!(aux->retry_cnt % retry_count))
|
||||
dp_catalog_aux_update_cfg(aux->catalog);
|
||||
dp_catalog_aux_reset(aux->catalog);
|
||||
}
|
||||
usleep_range(400, 500); /* at least 400us to next try */
|
||||
goto unlock_exit;
|
||||
}
|
||||
|
@ -190,6 +190,18 @@ int dp_catalog_aux_clear_hw_interrupts(struct dp_catalog *dp_catalog)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* dp_catalog_aux_reset() - reset AUX controller
|
||||
*
|
||||
* @aux: DP catalog structure
|
||||
*
|
||||
* return: void
|
||||
*
|
||||
* This function reset AUX controller
|
||||
*
|
||||
* NOTE: reset AUX controller will also clear any pending HPD related interrupts
|
||||
*
|
||||
*/
|
||||
void dp_catalog_aux_reset(struct dp_catalog *dp_catalog)
|
||||
{
|
||||
u32 aux_ctrl;
|
||||
@ -483,6 +495,18 @@ int dp_catalog_ctrl_set_pattern(struct dp_catalog *dp_catalog,
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* dp_catalog_ctrl_reset() - reset DP controller
|
||||
*
|
||||
* @dp_catalog: DP catalog structure
|
||||
*
|
||||
* return: void
|
||||
*
|
||||
* This function reset the DP controller
|
||||
*
|
||||
* NOTE: reset DP controller will also clear any pending HPD related interrupts
|
||||
*
|
||||
*/
|
||||
void dp_catalog_ctrl_reset(struct dp_catalog *dp_catalog)
|
||||
{
|
||||
u32 sw_reset;
|
||||
|
@ -631,7 +631,7 @@ static void _dp_ctrl_calc_tu(struct dp_tu_calc_input *in,
|
||||
|
||||
tu = kzalloc(sizeof(*tu), GFP_KERNEL);
|
||||
if (!tu)
|
||||
return
|
||||
return;
|
||||
|
||||
dp_panel_update_tu_timings(in, tu);
|
||||
|
||||
@ -1158,7 +1158,7 @@ static int dp_ctrl_link_rate_down_shift(struct dp_ctrl_private *ctrl)
|
||||
default:
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
};
|
||||
}
|
||||
|
||||
if (!ret)
|
||||
DRM_DEBUG_DP("new rate=0x%x\n", ctrl->link->link_params.rate);
|
||||
@ -1296,7 +1296,6 @@ static int dp_ctrl_setup_main_link(struct dp_ctrl_private *ctrl,
|
||||
* transitioned to PUSH_IDLE. In order to start transmitting
|
||||
* a link training pattern, we have to first do soft reset.
|
||||
*/
|
||||
dp_catalog_ctrl_reset(ctrl->catalog);
|
||||
|
||||
ret = dp_ctrl_link_train(ctrl, cr, training_step);
|
||||
|
||||
@ -1365,7 +1364,7 @@ static int dp_ctrl_enable_stream_clocks(struct dp_ctrl_private *ctrl)
|
||||
return ret;
|
||||
}
|
||||
|
||||
int dp_ctrl_host_init(struct dp_ctrl *dp_ctrl, bool flip)
|
||||
int dp_ctrl_host_init(struct dp_ctrl *dp_ctrl, bool flip, bool reset)
|
||||
{
|
||||
struct dp_ctrl_private *ctrl;
|
||||
struct dp_io *dp_io;
|
||||
@ -1382,6 +1381,9 @@ int dp_ctrl_host_init(struct dp_ctrl *dp_ctrl, bool flip)
|
||||
|
||||
ctrl->dp_ctrl.orientation = flip;
|
||||
|
||||
if (reset)
|
||||
dp_catalog_ctrl_reset(ctrl->catalog);
|
||||
|
||||
dp_catalog_ctrl_phy_reset(ctrl->catalog);
|
||||
phy_init(phy);
|
||||
dp_catalog_ctrl_enable_irq(ctrl->catalog, true);
|
||||
@ -1496,7 +1498,6 @@ static int dp_ctrl_link_maintenance(struct dp_ctrl_private *ctrl)
|
||||
int training_step = DP_TRAINING_NONE;
|
||||
|
||||
dp_ctrl_push_idle(&ctrl->dp_ctrl);
|
||||
dp_catalog_ctrl_reset(ctrl->catalog);
|
||||
|
||||
ctrl->dp_ctrl.pixel_rate = ctrl->panel->dp_mode.drm_mode.clock;
|
||||
|
||||
@ -1785,14 +1786,14 @@ int dp_ctrl_on_stream(struct dp_ctrl *dp_ctrl)
|
||||
* Set up transfer unit values and set controller state to send
|
||||
* video.
|
||||
*/
|
||||
reinit_completion(&ctrl->video_comp);
|
||||
|
||||
dp_ctrl_configure_source_params(ctrl);
|
||||
|
||||
dp_catalog_ctrl_config_msa(ctrl->catalog,
|
||||
ctrl->link->link_params.rate,
|
||||
ctrl->dp_ctrl.pixel_rate, dp_ctrl_use_fixed_nvid(ctrl));
|
||||
|
||||
reinit_completion(&ctrl->video_comp);
|
||||
|
||||
dp_ctrl_setup_tr_unit(ctrl);
|
||||
|
||||
dp_catalog_ctrl_state_ctrl(ctrl->catalog, DP_STATE_CTRL_SEND_VIDEO);
|
||||
|
@ -19,7 +19,7 @@ struct dp_ctrl {
|
||||
u32 pixel_rate;
|
||||
};
|
||||
|
||||
int dp_ctrl_host_init(struct dp_ctrl *dp_ctrl, bool flip);
|
||||
int dp_ctrl_host_init(struct dp_ctrl *dp_ctrl, bool flip, bool reset);
|
||||
void dp_ctrl_host_deinit(struct dp_ctrl *dp_ctrl);
|
||||
int dp_ctrl_on_link(struct dp_ctrl *dp_ctrl);
|
||||
int dp_ctrl_on_stream(struct dp_ctrl *dp_ctrl);
|
||||
|
@ -350,7 +350,7 @@ end:
|
||||
return rc;
|
||||
}
|
||||
|
||||
static void dp_display_host_init(struct dp_display_private *dp)
|
||||
static void dp_display_host_init(struct dp_display_private *dp, int reset)
|
||||
{
|
||||
bool flip = false;
|
||||
|
||||
@ -365,7 +365,7 @@ static void dp_display_host_init(struct dp_display_private *dp)
|
||||
dp_display_set_encoder_mode(dp);
|
||||
|
||||
dp_power_init(dp->power, flip);
|
||||
dp_ctrl_host_init(dp->ctrl, flip);
|
||||
dp_ctrl_host_init(dp->ctrl, flip, reset);
|
||||
dp_aux_init(dp->aux);
|
||||
dp->core_initialized = true;
|
||||
}
|
||||
@ -403,7 +403,7 @@ static int dp_display_usbpd_configure_cb(struct device *dev)
|
||||
goto end;
|
||||
}
|
||||
|
||||
dp_display_host_init(dp);
|
||||
dp_display_host_init(dp, false);
|
||||
|
||||
/*
|
||||
* set sink to normal operation mode -- D0
|
||||
@ -651,8 +651,8 @@ static int dp_hpd_unplug_handle(struct dp_display_private *dp, u32 data)
|
||||
dp_add_event(dp, EV_DISCONNECT_PENDING_TIMEOUT, 0, DP_TIMEOUT_5_SECOND);
|
||||
|
||||
/* signal the disconnect event early to ensure proper teardown */
|
||||
dp_display_handle_plugged_change(g_dp_display, false);
|
||||
reinit_completion(&dp->audio_comp);
|
||||
dp_display_handle_plugged_change(g_dp_display, false);
|
||||
|
||||
dp_catalog_hpd_config_intr(dp->catalog, DP_DP_HPD_PLUG_INT_MASK |
|
||||
DP_DP_IRQ_HPD_INT_MASK, true);
|
||||
@ -700,6 +700,13 @@ static int dp_irq_hpd_handle(struct dp_display_private *dp, u32 data)
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (state == ST_CONNECT_PENDING || state == ST_DISCONNECT_PENDING) {
|
||||
/* wait until ST_CONNECTED */
|
||||
dp_add_event(dp, EV_IRQ_HPD_INT, 0, 1); /* delay = 1 */
|
||||
mutex_unlock(&dp->event_mutex);
|
||||
return 0;
|
||||
}
|
||||
|
||||
ret = dp_display_usbpd_attention_cb(&dp->pdev->dev);
|
||||
if (ret == -ECONNRESET) { /* cable unplugged */
|
||||
dp->core_initialized = false;
|
||||
@ -890,6 +897,9 @@ static int dp_display_disable(struct dp_display_private *dp, u32 data)
|
||||
|
||||
/* wait only if audio was enabled */
|
||||
if (dp_display->audio_enabled) {
|
||||
/* signal the disconnect event */
|
||||
reinit_completion(&dp->audio_comp);
|
||||
dp_display_handle_plugged_change(dp_display, false);
|
||||
if (!wait_for_completion_timeout(&dp->audio_comp,
|
||||
HZ * 5))
|
||||
DRM_ERROR("audio comp timeout\n");
|
||||
@ -1002,7 +1012,7 @@ int dp_display_get_test_bpp(struct msm_dp *dp)
|
||||
static void dp_display_config_hpd(struct dp_display_private *dp)
|
||||
{
|
||||
|
||||
dp_display_host_init(dp);
|
||||
dp_display_host_init(dp, true);
|
||||
dp_catalog_ctrl_hpd_config(dp->catalog);
|
||||
|
||||
/* Enable interrupt first time
|
||||
@ -1256,7 +1266,7 @@ static int dp_pm_resume(struct device *dev)
|
||||
dp->hpd_state = ST_DISCONNECTED;
|
||||
|
||||
/* turn on dp ctrl/phy */
|
||||
dp_display_host_init(dp);
|
||||
dp_display_host_init(dp, true);
|
||||
|
||||
dp_catalog_ctrl_hpd_config(dp->catalog);
|
||||
|
||||
@ -1439,7 +1449,7 @@ int msm_dp_display_enable(struct msm_dp *dp, struct drm_encoder *encoder)
|
||||
state = dp_display->hpd_state;
|
||||
|
||||
if (state == ST_DISPLAY_OFF)
|
||||
dp_display_host_init(dp_display);
|
||||
dp_display_host_init(dp_display, true);
|
||||
|
||||
dp_display_enable(dp_display, 0);
|
||||
|
||||
|
@ -409,7 +409,6 @@ int dp_panel_timing_cfg(struct dp_panel *dp_panel)
|
||||
|
||||
int dp_panel_init_panel_info(struct dp_panel *dp_panel)
|
||||
{
|
||||
int rc = 0;
|
||||
struct drm_display_mode *drm_mode;
|
||||
|
||||
drm_mode = &dp_panel->dp_mode.drm_mode;
|
||||
@ -436,7 +435,7 @@ int dp_panel_init_panel_info(struct dp_panel *dp_panel)
|
||||
min_t(u32, dp_panel->dp_mode.bpp, 30));
|
||||
DRM_DEBUG_DP("updated bpp = %d\n", dp_panel->dp_mode.bpp);
|
||||
|
||||
return rc;
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct dp_panel *dp_panel_get(struct dp_panel_in *in)
|
||||
|
@ -139,7 +139,7 @@ const struct msm_dsi_phy_cfg dsi_phy_20nm_cfgs = {
|
||||
.disable = dsi_20nm_phy_disable,
|
||||
.init = msm_dsi_phy_init_common,
|
||||
},
|
||||
.io_start = { 0xfd998300, 0xfd9a0300 },
|
||||
.io_start = { 0xfd998500, 0xfd9a0500 },
|
||||
.num_dsi_phy = 2,
|
||||
};
|
||||
|
||||
|
@ -172,9 +172,7 @@ static void dsi_pll_calc_dec_frac(struct dsi_pll_10nm *pll)
|
||||
|
||||
multiplier = 1 << config->frac_bits;
|
||||
dec_multiple = div_u64(pll_freq * multiplier, divider);
|
||||
div_u64_rem(dec_multiple, multiplier, &frac);
|
||||
|
||||
dec = div_u64(dec_multiple, multiplier);
|
||||
dec = div_u64_rem(dec_multiple, multiplier, &frac);
|
||||
|
||||
if (pll_freq <= 1900000000UL)
|
||||
regs->pll_prop_gain_rate = 8;
|
||||
@ -306,7 +304,8 @@ static void dsi_pll_commit(struct dsi_pll_10nm *pll)
|
||||
reg->frac_div_start_mid);
|
||||
pll_write(base + REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_HIGH_1,
|
||||
reg->frac_div_start_high);
|
||||
pll_write(base + REG_DSI_10nm_PHY_PLL_PLL_LOCKDET_RATE_1, 0x40);
|
||||
pll_write(base + REG_DSI_10nm_PHY_PLL_PLL_LOCKDET_RATE_1,
|
||||
reg->pll_lockdet_rate);
|
||||
pll_write(base + REG_DSI_10nm_PHY_PLL_PLL_LOCK_DELAY, 0x06);
|
||||
pll_write(base + REG_DSI_10nm_PHY_PLL_CMODE, 0x10);
|
||||
pll_write(base + REG_DSI_10nm_PHY_PLL_CLOCK_INVERTERS,
|
||||
@ -345,6 +344,7 @@ static int dsi_pll_10nm_vco_set_rate(struct clk_hw *hw, unsigned long rate,
|
||||
|
||||
static int dsi_pll_10nm_lock_status(struct dsi_pll_10nm *pll)
|
||||
{
|
||||
struct device *dev = &pll->pdev->dev;
|
||||
int rc;
|
||||
u32 status = 0;
|
||||
u32 const delay_us = 100;
|
||||
@ -357,8 +357,8 @@ static int dsi_pll_10nm_lock_status(struct dsi_pll_10nm *pll)
|
||||
delay_us,
|
||||
timeout_us);
|
||||
if (rc)
|
||||
pr_err("DSI PLL(%d) lock failed, status=0x%08x\n",
|
||||
pll->id, status);
|
||||
DRM_DEV_ERROR(dev, "DSI PLL(%d) lock failed, status=0x%08x\n",
|
||||
pll->id, status);
|
||||
|
||||
return rc;
|
||||
}
|
||||
@ -405,6 +405,7 @@ static int dsi_pll_10nm_vco_prepare(struct clk_hw *hw)
|
||||
{
|
||||
struct msm_dsi_pll *pll = hw_clk_to_pll(hw);
|
||||
struct dsi_pll_10nm *pll_10nm = to_pll_10nm(pll);
|
||||
struct device *dev = &pll_10nm->pdev->dev;
|
||||
int rc;
|
||||
|
||||
dsi_pll_enable_pll_bias(pll_10nm);
|
||||
@ -413,7 +414,7 @@ static int dsi_pll_10nm_vco_prepare(struct clk_hw *hw)
|
||||
|
||||
rc = dsi_pll_10nm_vco_set_rate(hw,pll_10nm->vco_current_rate, 0);
|
||||
if (rc) {
|
||||
pr_err("vco_set_rate failed, rc=%d\n", rc);
|
||||
DRM_DEV_ERROR(dev, "vco_set_rate failed, rc=%d\n", rc);
|
||||
return rc;
|
||||
}
|
||||
|
||||
@ -430,7 +431,7 @@ static int dsi_pll_10nm_vco_prepare(struct clk_hw *hw)
|
||||
/* Check for PLL lock */
|
||||
rc = dsi_pll_10nm_lock_status(pll_10nm);
|
||||
if (rc) {
|
||||
pr_err("PLL(%d) lock failed\n", pll_10nm->id);
|
||||
DRM_DEV_ERROR(dev, "PLL(%d) lock failed\n", pll_10nm->id);
|
||||
goto error;
|
||||
}
|
||||
|
||||
@ -483,6 +484,7 @@ static unsigned long dsi_pll_10nm_vco_recalc_rate(struct clk_hw *hw,
|
||||
{
|
||||
struct msm_dsi_pll *pll = hw_clk_to_pll(hw);
|
||||
struct dsi_pll_10nm *pll_10nm = to_pll_10nm(pll);
|
||||
struct dsi_pll_config *config = &pll_10nm->pll_configuration;
|
||||
void __iomem *base = pll_10nm->mmio;
|
||||
u64 ref_clk = pll_10nm->vco_ref_clk_rate;
|
||||
u64 vco_rate = 0x0;
|
||||
@ -503,9 +505,8 @@ static unsigned long dsi_pll_10nm_vco_recalc_rate(struct clk_hw *hw,
|
||||
/*
|
||||
* TODO:
|
||||
* 1. Assumes prescaler is disabled
|
||||
* 2. Multiplier is 2^18. it should be 2^(num_of_frac_bits)
|
||||
*/
|
||||
multiplier = 1 << 18;
|
||||
multiplier = 1 << config->frac_bits;
|
||||
pll_freq = dec * (ref_clk * 2);
|
||||
tmp64 = (ref_clk * 2 * frac);
|
||||
pll_freq += div_u64(tmp64, multiplier);
|
||||
|
@ -788,9 +788,10 @@ static int msm_ioctl_gem_info_iova(struct drm_device *dev,
|
||||
struct drm_file *file, struct drm_gem_object *obj,
|
||||
uint64_t *iova)
|
||||
{
|
||||
struct msm_drm_private *priv = dev->dev_private;
|
||||
struct msm_file_private *ctx = file->driver_priv;
|
||||
|
||||
if (!ctx->aspace)
|
||||
if (!priv->gpu)
|
||||
return -EINVAL;
|
||||
|
||||
/*
|
||||
|
@ -987,8 +987,7 @@ void msm_gem_free_object(struct drm_gem_object *obj)
|
||||
/* Don't drop the pages for imported dmabuf, as they are not
|
||||
* ours, just free the array we allocated:
|
||||
*/
|
||||
if (msm_obj->pages)
|
||||
kvfree(msm_obj->pages);
|
||||
kvfree(msm_obj->pages);
|
||||
|
||||
put_iova_vmas(obj);
|
||||
|
||||
|
@ -198,6 +198,8 @@ static int submit_lookup_cmds(struct msm_gem_submit *submit,
|
||||
submit->cmd[i].idx = submit_cmd.submit_idx;
|
||||
submit->cmd[i].nr_relocs = submit_cmd.nr_relocs;
|
||||
|
||||
userptr = u64_to_user_ptr(submit_cmd.relocs);
|
||||
|
||||
sz = array_size(submit_cmd.nr_relocs,
|
||||
sizeof(struct drm_msm_gem_submit_reloc));
|
||||
/* check for overflow: */
|
||||
|
@ -157,6 +157,7 @@ struct msm_kms {
|
||||
* from the crtc's pending_timer close to end of the frame:
|
||||
*/
|
||||
struct mutex commit_lock[MAX_CRTCS];
|
||||
struct lock_class_key commit_lock_keys[MAX_CRTCS];
|
||||
unsigned pending_crtc_mask;
|
||||
struct msm_pending_timer pending_timers[MAX_CRTCS];
|
||||
};
|
||||
@ -166,8 +167,11 @@ static inline int msm_kms_init(struct msm_kms *kms,
|
||||
{
|
||||
unsigned i, ret;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(kms->commit_lock); i++)
|
||||
mutex_init(&kms->commit_lock[i]);
|
||||
for (i = 0; i < ARRAY_SIZE(kms->commit_lock); i++) {
|
||||
lockdep_register_key(&kms->commit_lock_keys[i]);
|
||||
__mutex_init(&kms->commit_lock[i], "&kms->commit_lock[i]",
|
||||
&kms->commit_lock_keys[i]);
|
||||
}
|
||||
|
||||
kms->funcs = funcs;
|
||||
|
||||
|
@ -265,7 +265,8 @@ static int kd35t133_probe(struct mipi_dsi_device *dsi)
|
||||
dsi->lanes = 1;
|
||||
dsi->format = MIPI_DSI_FMT_RGB888;
|
||||
dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
|
||||
MIPI_DSI_MODE_LPM | MIPI_DSI_MODE_EOT_PACKET;
|
||||
MIPI_DSI_MODE_LPM | MIPI_DSI_MODE_EOT_PACKET |
|
||||
MIPI_DSI_CLOCK_NON_CONTINUOUS;
|
||||
|
||||
drm_panel_init(&ctx->panel, &dsi->dev, &kd35t133_funcs,
|
||||
DRM_MODE_CONNECTOR_DSI);
|
||||
|
@ -17,9 +17,20 @@
|
||||
|
||||
#define NUM_YUV2YUV_COEFFICIENTS 12
|
||||
|
||||
/* AFBC supports a number of configurable modes. Relevant to us is block size
|
||||
* (16x16 or 32x8), storage modifiers (SPARSE, SPLIT), and the YUV-like
|
||||
* colourspace transform (YTR). 16x16 SPARSE mode is always used. SPLIT mode
|
||||
* could be enabled via the hreg_block_split register, but is not currently
|
||||
* handled. The colourspace transform is implicitly always assumed by the
|
||||
* decoder, so consumers must use this transform as well.
|
||||
*
|
||||
* Failure to match modifiers will cause errors displaying AFBC buffers
|
||||
* produced by conformant AFBC producers, including Mesa.
|
||||
*/
|
||||
#define ROCKCHIP_AFBC_MOD \
|
||||
DRM_FORMAT_MOD_ARM_AFBC( \
|
||||
AFBC_FORMAT_MOD_BLOCK_SIZE_16x16 | AFBC_FORMAT_MOD_SPARSE \
|
||||
| AFBC_FORMAT_MOD_YTR \
|
||||
)
|
||||
|
||||
enum vop_data_format {
|
||||
|
@ -959,8 +959,10 @@ static int ttm_bo_bounce_temp_buffer(struct ttm_buffer_object *bo,
|
||||
return ret;
|
||||
/* move to the bounce domain */
|
||||
ret = ttm_bo_handle_move_mem(bo, &hop_mem, false, ctx, NULL);
|
||||
if (ret)
|
||||
if (ret) {
|
||||
ttm_resource_free(bo, &hop_mem);
|
||||
return ret;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -991,18 +993,19 @@ static int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
|
||||
* stop and the driver will be called to make
|
||||
* the second hop.
|
||||
*/
|
||||
bounce:
|
||||
ret = ttm_bo_mem_space(bo, placement, &mem, ctx);
|
||||
if (ret)
|
||||
return ret;
|
||||
bounce:
|
||||
ret = ttm_bo_handle_move_mem(bo, &mem, false, ctx, &hop);
|
||||
if (ret == -EMULTIHOP) {
|
||||
ret = ttm_bo_bounce_temp_buffer(bo, &mem, ctx, &hop);
|
||||
if (ret)
|
||||
return ret;
|
||||
goto out;
|
||||
/* try and move to final place now. */
|
||||
goto bounce;
|
||||
}
|
||||
out:
|
||||
if (ret)
|
||||
ttm_resource_free(bo, &mem);
|
||||
return ret;
|
||||
|
@ -175,6 +175,15 @@ u32 aty_ld_lcd(int index, const struct atyfb_par *par)
|
||||
return aty_ld_le32(LCD_DATA, par);
|
||||
}
|
||||
}
|
||||
#else /* defined(CONFIG_PMAC_BACKLIGHT) || defined(CONFIG_FB_ATY_BACKLIGHT) \
|
||||
defined(CONFIG_FB_ATY_GENERIC_LCD) */
|
||||
void aty_st_lcd(int index, u32 val, const struct atyfb_par *par)
|
||||
{ }
|
||||
|
||||
u32 aty_ld_lcd(int index, const struct atyfb_par *par)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif /* defined(CONFIG_PMAC_BACKLIGHT) || defined (CONFIG_FB_ATY_GENERIC_LCD) */
|
||||
|
||||
#ifdef CONFIG_FB_ATY_GENERIC_LCD
|
||||
|
@ -399,6 +399,9 @@ void drm_event_cancel_free(struct drm_device *dev,
|
||||
struct drm_pending_event *p);
|
||||
void drm_send_event_locked(struct drm_device *dev, struct drm_pending_event *e);
|
||||
void drm_send_event(struct drm_device *dev, struct drm_pending_event *e);
|
||||
void drm_send_event_timestamp_locked(struct drm_device *dev,
|
||||
struct drm_pending_event *e,
|
||||
ktime_t timestamp);
|
||||
|
||||
struct file *mock_drm_getfile(struct drm_minor *minor, unsigned int flags);
|
||||
|
||||
|
@ -372,6 +372,9 @@ static inline void __dma_fence_might_wait(void) {}
|
||||
|
||||
int dma_fence_signal(struct dma_fence *fence);
|
||||
int dma_fence_signal_locked(struct dma_fence *fence);
|
||||
int dma_fence_signal_timestamp(struct dma_fence *fence, ktime_t timestamp);
|
||||
int dma_fence_signal_timestamp_locked(struct dma_fence *fence,
|
||||
ktime_t timestamp);
|
||||
signed long dma_fence_default_wait(struct dma_fence *fence,
|
||||
bool intr, signed long timeout);
|
||||
int dma_fence_add_callback(struct dma_fence *fence,
|
||||
|
@ -16,15 +16,15 @@ struct dma_heap;
|
||||
|
||||
/**
|
||||
* struct dma_heap_ops - ops to operate on a given heap
|
||||
* @allocate: allocate dmabuf and return fd
|
||||
* @allocate: allocate dmabuf and return struct dma_buf ptr
|
||||
*
|
||||
* allocate returns dmabuf fd on success, -errno on error.
|
||||
* allocate returns dmabuf on success, ERR_PTR(-errno) on error.
|
||||
*/
|
||||
struct dma_heap_ops {
|
||||
int (*allocate)(struct dma_heap *heap,
|
||||
unsigned long len,
|
||||
unsigned long fd_flags,
|
||||
unsigned long heap_flags);
|
||||
struct dma_buf *(*allocate)(struct dma_heap *heap,
|
||||
unsigned long len,
|
||||
unsigned long fd_flags,
|
||||
unsigned long heap_flags);
|
||||
};
|
||||
|
||||
/**
|
||||
|
Loading…
Reference in New Issue
Block a user