mirror of
https://github.com/torvalds/linux.git
synced 2024-11-30 16:11:38 +00:00
drm fixes for v6.3-rc3
accel: - build fix for accel edid: - fix info leak in edid ttm: - fix NULL ptr deref - reference counting fix i915: - Fix hwmon PL1 power limit enabling - Fix audio ELD handling for DP MST - Fix PSR io and wake line calculations - Fix DG2 HDMI modes with 267.30 and 319.89 MHz pixel clocks - Fix SSEU subslice out-of-bounds access - Fix misuse of non-idle barriers as fence trackers amdgpu: - SMU 13 update - RDNA2 suspend/resume fix when overclocking is enabled - SRIOV VCN fixes - HDCP suspend/resume fix - Fix drm polling splat regression - Fix dirty rectangle tracking for PSR - Fix vangogh regression on certain BIOSes - Misc display fixes - Suspend/resume IOMMU regression fix amdkfd: - Fix BO offset for multi-VMA page migration - Fix a possible double free - Fix potential use after free - Fix process cleanup on module exit bridge: - fix returned array size name documentation fbdev: - ref-counting fix for fbdev deferred I/O virtio: - dma sync fix shmem-helper: - error path fix msm: - shrinker blocking fix panfrost: - shrinker rpm fix chipsfb: - fix error code meson: - fix 1px pink line - fix regulator interaction sun4i: - fix missing component unbind * driver fixes -----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEEEKbZHaGwW9KfbeusDHTzWXnEhr4FAmQUHXEACgkQDHTzWXnE hr5FShAAgsIyAI9TbdLGCuoQMeNL4u14/RH55ranPoUobqo7AqiE5JUb/Qmzv32h Gv6YR+fPjgSAv78313i6z0PkG03K0YPj6ssRkpERKaEyTiC3aKVBeuNNyOwXHlL/ AXv4T8bqFCXqIGKAHZhvMcg7IiA5KWycgpRu4VHxw/s3afJl9s6WFhYRPMV98vBx TTHF7WgpuXV05js9a3UB72I/Y+4AYbfRWDQ4z+QEsQtNcQLEURUn5to7fzcFfAOY sUBTuFqTGZChq7gbtgdMdiMlgOkjh4ZQVM9mvRcvt1rlmZxy/lKeL3tJyLDkXBJI BEGVx4F3j9xw9/m0wx3rb86lWwSeeqGgTevDcCnCw9BXKmFLL9svIBjt7N3vVmyP VW0fB8Fpq61UBdPWUvfRlG6tN0n2GMge6fq1xRCBvRE1U4Xzp37V+VOuo2FsNr79 oF+MGnrxWKGl9tF/n9gty69ljaaTS+fPd4F0vb/Z7gDp7m91gIoM18FRVZXFgzIQ 3PfbSwHpGI6LrEKGdRA6X41YLeeqtXS4yDsRxqs6O6I1aEqebkKB+Yx6Ynv/hX3E ys0e2dRAcXGTnaEeXa5HfUR68irf8/vR8/gmzIdLL+bttRzQlfjxCtl8V2iJ4hMz mIvVJvkEjpnE9OqFrmoh/zKupo1248/9R0wyM+bWs92D6cXv8xw= =DdtA -----END PGP SIGNATURE----- Merge tag 'drm-fixes-2023-03-17' of git://anongit.freedesktop.org/drm/drm Pull drm fixes from Dave Airlie: "Seems like a pretty regular rc3, i915 and amdgpu with the usual selection of fixes, then a scattering of fixes across misc drivers and other areas: accel: - build fix for accel edid: - fix info leak in edid ttm: - fix NULL ptr deref - reference counting fix i915: - Fix hwmon PL1 power limit enabling - Fix audio ELD handling for DP MST - Fix PSR io and wake line calculations - Fix DG2 HDMI modes with 267.30 and 319.89 MHz pixel clocks - Fix SSEU subslice out-of-bounds access - Fix misuse of non-idle barriers as fence trackers amdgpu: - SMU 13 update - RDNA2 suspend/resume fix when overclocking is enabled - SRIOV VCN fixes - HDCP suspend/resume fix - Fix drm polling splat regression - Fix dirty rectangle tracking for PSR - Fix vangogh regression on certain BIOSes - Misc display fixes - Suspend/resume IOMMU regression fix amdkfd: - Fix BO offset for multi-VMA page migration - Fix a possible double free - Fix potential use after free - Fix process cleanup on module exit bridge: - fix returned array size name documentation fbdev: - ref-counting fix for fbdev deferred I/O virtio: - dma sync fix shmem-helper: - error path fix msm: - shrinker blocking fix panfrost: - shrinker rpm fix chipsfb: - fix error code meson: - fix 1px pink line - fix regulator interaction sun4i: - fix missing component unbind" * tag 'drm-fixes-2023-03-17' of git://anongit.freedesktop.org/drm/drm: (38 commits) drm/ttm: drop extra ttm_bo_put in ttm_bo_cleanup_refs drm/amdgpu: Don't resume IOMMU after incomplete init drm/amdkfd: Fixed kfd_process cleanup on module exit. drm/amd/display: disconnect MPCC only on OTG change drm/amd/display: Fix DP MST sinks removal issue drm/amd/display: Do not set DRR on pipe Commit drm/amd/display: Remove OTG DIV register write for Virtual signals. drm/meson: dw-hdmi: Fix devm_regulator_*get_enable*() conversion again drm/bridge: Fix returned array size name for atomic_get_input_bus_fmts kdoc drm/amdgpu/vcn: Disable indirect SRAM on Vangogh broken BIOSes drm/amdgpu/nv: fix codec array for SR_IOV drm/amd/display: Write to correct dirty_rect drm/amdgpu: move poll enabled/disable into non DC path drm/amd/display: Fix HDCP failing to enable after suspend drm/amdkfd: fix potential kgd_mem UAFs drm/amdgpu/vcn: custom video info caps for sriov drm/amd/pm: Fix sienna cichlid incorrect OD volage after resume drm/amd/pm: bump SMU 13.0.4 driver_if header version drm/amdkfd: fix a potential double free in pqm_create_queue drm/amdkfd: Get prange->offset after svm_range_vram_node_new ...
This commit is contained in:
commit
2cf5a401c8
@ -1,4 +1,4 @@
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
|
||||
obj-y += habanalabs/
|
||||
obj-y += ivpu/
|
||||
obj-$(CONFIG_DRM_ACCEL_HABANALABS) += habanalabs/
|
||||
obj-$(CONFIG_DRM_ACCEL_IVPU) += ivpu/
|
||||
|
@ -4145,8 +4145,6 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
|
||||
if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D3))
|
||||
DRM_WARN("smart shift update failed\n");
|
||||
|
||||
drm_kms_helper_poll_disable(dev);
|
||||
|
||||
if (fbcon)
|
||||
drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, true);
|
||||
|
||||
@ -4243,8 +4241,6 @@ exit:
|
||||
if (fbcon)
|
||||
drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, false);
|
||||
|
||||
drm_kms_helper_poll_enable(dev);
|
||||
|
||||
amdgpu_ras_resume(adev);
|
||||
|
||||
if (adev->mode_info.num_crtc) {
|
||||
|
@ -1618,6 +1618,8 @@ int amdgpu_display_suspend_helper(struct amdgpu_device *adev)
|
||||
struct drm_connector_list_iter iter;
|
||||
int r;
|
||||
|
||||
drm_kms_helper_poll_disable(dev);
|
||||
|
||||
/* turn off display hw */
|
||||
drm_modeset_lock_all(dev);
|
||||
drm_connector_list_iter_begin(dev, &iter);
|
||||
@ -1694,6 +1696,8 @@ int amdgpu_display_resume_helper(struct amdgpu_device *adev)
|
||||
|
||||
drm_modeset_unlock_all(dev);
|
||||
|
||||
drm_kms_helper_poll_enable(dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -26,6 +26,7 @@
|
||||
|
||||
#include <linux/firmware.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/dmi.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/debugfs.h>
|
||||
#include <drm/drm_drv.h>
|
||||
@ -114,6 +115,24 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
|
||||
(adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
|
||||
adev->vcn.indirect_sram = true;
|
||||
|
||||
/*
|
||||
* Some Steam Deck's BIOS versions are incompatible with the
|
||||
* indirect SRAM mode, leading to amdgpu being unable to get
|
||||
* properly probed (and even potentially crashing the kernel).
|
||||
* Hence, check for these versions here - notice this is
|
||||
* restricted to Vangogh (Deck's APU).
|
||||
*/
|
||||
if (adev->ip_versions[UVD_HWIP][0] == IP_VERSION(3, 0, 2)) {
|
||||
const char *bios_ver = dmi_get_system_info(DMI_BIOS_VERSION);
|
||||
|
||||
if (bios_ver && (!strncmp("F7A0113", bios_ver, 7) ||
|
||||
!strncmp("F7A0114", bios_ver, 7))) {
|
||||
adev->vcn.indirect_sram = false;
|
||||
dev_info(adev->dev,
|
||||
"Steam Deck quirk: indirect SRAM disabled on BIOS %s\n", bios_ver);
|
||||
}
|
||||
}
|
||||
|
||||
hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
|
||||
adev->vcn.fw_version = le32_to_cpu(hdr->ucode_version);
|
||||
|
||||
|
@ -124,6 +124,8 @@ enum AMDGIM_FEATURE_FLAG {
|
||||
AMDGIM_FEATURE_PP_ONE_VF = (1 << 4),
|
||||
/* Indirect Reg Access enabled */
|
||||
AMDGIM_FEATURE_INDIRECT_REG_ACCESS = (1 << 5),
|
||||
/* AV1 Support MODE*/
|
||||
AMDGIM_FEATURE_AV1_SUPPORT = (1 << 6),
|
||||
};
|
||||
|
||||
enum AMDGIM_REG_ACCESS_FLAG {
|
||||
@ -322,6 +324,8 @@ static inline bool is_virtual_machine(void)
|
||||
((!amdgpu_in_reset(adev)) && adev->virt.tdr_debug)
|
||||
#define amdgpu_sriov_is_normal(adev) \
|
||||
((!amdgpu_in_reset(adev)) && (!adev->virt.tdr_debug))
|
||||
#define amdgpu_sriov_is_av1_support(adev) \
|
||||
((adev)->virt.gim_feature & AMDGIM_FEATURE_AV1_SUPPORT)
|
||||
bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev);
|
||||
void amdgpu_virt_init_setting(struct amdgpu_device *adev);
|
||||
void amdgpu_virt_kiq_reg_write_reg_wait(struct amdgpu_device *adev,
|
||||
|
@ -93,7 +93,8 @@ union amd_sriov_msg_feature_flags {
|
||||
uint32_t mm_bw_management : 1;
|
||||
uint32_t pp_one_vf_mode : 1;
|
||||
uint32_t reg_indirect_acc : 1;
|
||||
uint32_t reserved : 26;
|
||||
uint32_t av1_support : 1;
|
||||
uint32_t reserved : 25;
|
||||
} flags;
|
||||
uint32_t all;
|
||||
};
|
||||
|
@ -1055,8 +1055,8 @@ static int nv_common_late_init(void *handle)
|
||||
amdgpu_virt_update_sriov_video_codec(adev,
|
||||
sriov_sc_video_codecs_encode_array,
|
||||
ARRAY_SIZE(sriov_sc_video_codecs_encode_array),
|
||||
sriov_sc_video_codecs_decode_array_vcn1,
|
||||
ARRAY_SIZE(sriov_sc_video_codecs_decode_array_vcn1));
|
||||
sriov_sc_video_codecs_decode_array_vcn0,
|
||||
ARRAY_SIZE(sriov_sc_video_codecs_decode_array_vcn0));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -102,6 +102,59 @@ static const struct amdgpu_video_codecs vcn_4_0_0_video_codecs_decode_vcn1 =
|
||||
.codec_array = vcn_4_0_0_video_codecs_decode_array_vcn1,
|
||||
};
|
||||
|
||||
/* SRIOV SOC21, not const since data is controlled by host */
|
||||
static struct amdgpu_video_codec_info sriov_vcn_4_0_0_video_codecs_encode_array_vcn0[] = {
|
||||
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2304, 0)},
|
||||
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 2304, 0)},
|
||||
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1, 8192, 4352, 0)},
|
||||
};
|
||||
|
||||
static struct amdgpu_video_codec_info sriov_vcn_4_0_0_video_codecs_encode_array_vcn1[] = {
|
||||
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2304, 0)},
|
||||
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 2304, 0)},
|
||||
};
|
||||
|
||||
static struct amdgpu_video_codecs sriov_vcn_4_0_0_video_codecs_encode_vcn0 = {
|
||||
.codec_count = ARRAY_SIZE(sriov_vcn_4_0_0_video_codecs_encode_array_vcn0),
|
||||
.codec_array = sriov_vcn_4_0_0_video_codecs_encode_array_vcn0,
|
||||
};
|
||||
|
||||
static struct amdgpu_video_codecs sriov_vcn_4_0_0_video_codecs_encode_vcn1 = {
|
||||
.codec_count = ARRAY_SIZE(sriov_vcn_4_0_0_video_codecs_encode_array_vcn1),
|
||||
.codec_array = sriov_vcn_4_0_0_video_codecs_encode_array_vcn1,
|
||||
};
|
||||
|
||||
static struct amdgpu_video_codec_info sriov_vcn_4_0_0_video_codecs_decode_array_vcn0[] = {
|
||||
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4096, 3)},
|
||||
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4096, 5)},
|
||||
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)},
|
||||
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4096, 4)},
|
||||
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
|
||||
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
|
||||
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
|
||||
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1, 8192, 4352, 0)},
|
||||
};
|
||||
|
||||
static struct amdgpu_video_codec_info sriov_vcn_4_0_0_video_codecs_decode_array_vcn1[] = {
|
||||
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4096, 3)},
|
||||
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4096, 5)},
|
||||
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)},
|
||||
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4096, 4)},
|
||||
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
|
||||
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
|
||||
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
|
||||
};
|
||||
|
||||
static struct amdgpu_video_codecs sriov_vcn_4_0_0_video_codecs_decode_vcn0 = {
|
||||
.codec_count = ARRAY_SIZE(sriov_vcn_4_0_0_video_codecs_decode_array_vcn0),
|
||||
.codec_array = sriov_vcn_4_0_0_video_codecs_decode_array_vcn0,
|
||||
};
|
||||
|
||||
static struct amdgpu_video_codecs sriov_vcn_4_0_0_video_codecs_decode_vcn1 = {
|
||||
.codec_count = ARRAY_SIZE(sriov_vcn_4_0_0_video_codecs_decode_array_vcn1),
|
||||
.codec_array = sriov_vcn_4_0_0_video_codecs_decode_array_vcn1,
|
||||
};
|
||||
|
||||
static int soc21_query_video_codecs(struct amdgpu_device *adev, bool encode,
|
||||
const struct amdgpu_video_codecs **codecs)
|
||||
{
|
||||
@ -112,16 +165,31 @@ static int soc21_query_video_codecs(struct amdgpu_device *adev, bool encode,
|
||||
case IP_VERSION(4, 0, 0):
|
||||
case IP_VERSION(4, 0, 2):
|
||||
case IP_VERSION(4, 0, 4):
|
||||
if (adev->vcn.harvest_config & AMDGPU_VCN_HARVEST_VCN0) {
|
||||
if (encode)
|
||||
*codecs = &vcn_4_0_0_video_codecs_encode_vcn1;
|
||||
else
|
||||
*codecs = &vcn_4_0_0_video_codecs_decode_vcn1;
|
||||
if (amdgpu_sriov_vf(adev)) {
|
||||
if ((adev->vcn.harvest_config & AMDGPU_VCN_HARVEST_VCN0) ||
|
||||
!amdgpu_sriov_is_av1_support(adev)) {
|
||||
if (encode)
|
||||
*codecs = &sriov_vcn_4_0_0_video_codecs_encode_vcn1;
|
||||
else
|
||||
*codecs = &sriov_vcn_4_0_0_video_codecs_decode_vcn1;
|
||||
} else {
|
||||
if (encode)
|
||||
*codecs = &sriov_vcn_4_0_0_video_codecs_encode_vcn0;
|
||||
else
|
||||
*codecs = &sriov_vcn_4_0_0_video_codecs_decode_vcn0;
|
||||
}
|
||||
} else {
|
||||
if (encode)
|
||||
*codecs = &vcn_4_0_0_video_codecs_encode_vcn0;
|
||||
else
|
||||
*codecs = &vcn_4_0_0_video_codecs_decode_vcn0;
|
||||
if ((adev->vcn.harvest_config & AMDGPU_VCN_HARVEST_VCN0)) {
|
||||
if (encode)
|
||||
*codecs = &vcn_4_0_0_video_codecs_encode_vcn1;
|
||||
else
|
||||
*codecs = &vcn_4_0_0_video_codecs_decode_vcn1;
|
||||
} else {
|
||||
if (encode)
|
||||
*codecs = &vcn_4_0_0_video_codecs_encode_vcn0;
|
||||
else
|
||||
*codecs = &vcn_4_0_0_video_codecs_decode_vcn0;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
default:
|
||||
@ -730,8 +798,23 @@ static int soc21_common_late_init(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
if (amdgpu_sriov_vf(adev))
|
||||
if (amdgpu_sriov_vf(adev)) {
|
||||
xgpu_nv_mailbox_get_irq(adev);
|
||||
if ((adev->vcn.harvest_config & AMDGPU_VCN_HARVEST_VCN0) ||
|
||||
!amdgpu_sriov_is_av1_support(adev)) {
|
||||
amdgpu_virt_update_sriov_video_codec(adev,
|
||||
sriov_vcn_4_0_0_video_codecs_encode_array_vcn1,
|
||||
ARRAY_SIZE(sriov_vcn_4_0_0_video_codecs_encode_array_vcn1),
|
||||
sriov_vcn_4_0_0_video_codecs_decode_array_vcn1,
|
||||
ARRAY_SIZE(sriov_vcn_4_0_0_video_codecs_decode_array_vcn1));
|
||||
} else {
|
||||
amdgpu_virt_update_sriov_video_codec(adev,
|
||||
sriov_vcn_4_0_0_video_codecs_encode_array_vcn0,
|
||||
ARRAY_SIZE(sriov_vcn_4_0_0_video_codecs_encode_array_vcn0),
|
||||
sriov_vcn_4_0_0_video_codecs_decode_array_vcn0,
|
||||
ARRAY_SIZE(sriov_vcn_4_0_0_video_codecs_decode_array_vcn0));
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -1312,14 +1312,14 @@ static int kfd_ioctl_map_memory_to_gpu(struct file *filep,
|
||||
args->n_success = i+1;
|
||||
}
|
||||
|
||||
mutex_unlock(&p->mutex);
|
||||
|
||||
err = amdgpu_amdkfd_gpuvm_sync_memory(dev->adev, (struct kgd_mem *) mem, true);
|
||||
if (err) {
|
||||
pr_debug("Sync memory failed, wait interrupted by user signal\n");
|
||||
goto sync_memory_failed;
|
||||
}
|
||||
|
||||
mutex_unlock(&p->mutex);
|
||||
|
||||
/* Flush TLBs after waiting for the page table updates to complete */
|
||||
for (i = 0; i < args->n_devices; i++) {
|
||||
peer_pdd = kfd_process_device_data_by_id(p, devices_arr[i]);
|
||||
@ -1335,9 +1335,9 @@ get_process_device_data_failed:
|
||||
bind_process_to_device_failed:
|
||||
get_mem_obj_from_handle_failed:
|
||||
map_memory_to_gpu_failed:
|
||||
sync_memory_failed:
|
||||
mutex_unlock(&p->mutex);
|
||||
copy_from_user_failed:
|
||||
sync_memory_failed:
|
||||
kfree(devices_arr);
|
||||
|
||||
return err;
|
||||
@ -1351,6 +1351,7 @@ static int kfd_ioctl_unmap_memory_from_gpu(struct file *filep,
|
||||
void *mem;
|
||||
long err = 0;
|
||||
uint32_t *devices_arr = NULL, i;
|
||||
bool flush_tlb;
|
||||
|
||||
if (!args->n_devices) {
|
||||
pr_debug("Device IDs array empty\n");
|
||||
@ -1403,16 +1404,19 @@ static int kfd_ioctl_unmap_memory_from_gpu(struct file *filep,
|
||||
}
|
||||
args->n_success = i+1;
|
||||
}
|
||||
mutex_unlock(&p->mutex);
|
||||
|
||||
if (kfd_flush_tlb_after_unmap(pdd->dev)) {
|
||||
flush_tlb = kfd_flush_tlb_after_unmap(pdd->dev);
|
||||
if (flush_tlb) {
|
||||
err = amdgpu_amdkfd_gpuvm_sync_memory(pdd->dev->adev,
|
||||
(struct kgd_mem *) mem, true);
|
||||
if (err) {
|
||||
pr_debug("Sync memory failed, wait interrupted by user signal\n");
|
||||
goto sync_memory_failed;
|
||||
}
|
||||
}
|
||||
mutex_unlock(&p->mutex);
|
||||
|
||||
if (flush_tlb) {
|
||||
/* Flush TLBs after waiting for the page table updates to complete */
|
||||
for (i = 0; i < args->n_devices; i++) {
|
||||
peer_pdd = kfd_process_device_data_by_id(p, devices_arr[i]);
|
||||
@ -1428,9 +1432,9 @@ static int kfd_ioctl_unmap_memory_from_gpu(struct file *filep,
|
||||
bind_process_to_device_failed:
|
||||
get_mem_obj_from_handle_failed:
|
||||
unmap_memory_from_gpu_failed:
|
||||
sync_memory_failed:
|
||||
mutex_unlock(&p->mutex);
|
||||
copy_from_user_failed:
|
||||
sync_memory_failed:
|
||||
kfree(devices_arr);
|
||||
return err;
|
||||
}
|
||||
|
@ -59,6 +59,7 @@ static int kfd_gtt_sa_init(struct kfd_dev *kfd, unsigned int buf_size,
|
||||
unsigned int chunk_size);
|
||||
static void kfd_gtt_sa_fini(struct kfd_dev *kfd);
|
||||
|
||||
static int kfd_resume_iommu(struct kfd_dev *kfd);
|
||||
static int kfd_resume(struct kfd_dev *kfd);
|
||||
|
||||
static void kfd_device_info_set_sdma_info(struct kfd_dev *kfd)
|
||||
@ -624,7 +625,7 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
|
||||
|
||||
svm_migrate_init(kfd->adev);
|
||||
|
||||
if (kgd2kfd_resume_iommu(kfd))
|
||||
if (kfd_resume_iommu(kfd))
|
||||
goto device_iommu_error;
|
||||
|
||||
if (kfd_resume(kfd))
|
||||
@ -772,6 +773,14 @@ int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm)
|
||||
}
|
||||
|
||||
int kgd2kfd_resume_iommu(struct kfd_dev *kfd)
|
||||
{
|
||||
if (!kfd->init_complete)
|
||||
return 0;
|
||||
|
||||
return kfd_resume_iommu(kfd);
|
||||
}
|
||||
|
||||
static int kfd_resume_iommu(struct kfd_dev *kfd)
|
||||
{
|
||||
int err = 0;
|
||||
|
||||
|
@ -289,7 +289,7 @@ static unsigned long svm_migrate_unsuccessful_pages(struct migrate_vma *migrate)
|
||||
static int
|
||||
svm_migrate_copy_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
|
||||
struct migrate_vma *migrate, struct dma_fence **mfence,
|
||||
dma_addr_t *scratch)
|
||||
dma_addr_t *scratch, uint64_t ttm_res_offset)
|
||||
{
|
||||
uint64_t npages = migrate->npages;
|
||||
struct device *dev = adev->dev;
|
||||
@ -299,19 +299,13 @@ svm_migrate_copy_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
|
||||
uint64_t i, j;
|
||||
int r;
|
||||
|
||||
pr_debug("svms 0x%p [0x%lx 0x%lx]\n", prange->svms, prange->start,
|
||||
prange->last);
|
||||
pr_debug("svms 0x%p [0x%lx 0x%lx 0x%llx]\n", prange->svms, prange->start,
|
||||
prange->last, ttm_res_offset);
|
||||
|
||||
src = scratch;
|
||||
dst = (uint64_t *)(scratch + npages);
|
||||
|
||||
r = svm_range_vram_node_new(adev, prange, true);
|
||||
if (r) {
|
||||
dev_dbg(adev->dev, "fail %d to alloc vram\n", r);
|
||||
goto out;
|
||||
}
|
||||
|
||||
amdgpu_res_first(prange->ttm_res, prange->offset << PAGE_SHIFT,
|
||||
amdgpu_res_first(prange->ttm_res, ttm_res_offset,
|
||||
npages << PAGE_SHIFT, &cursor);
|
||||
for (i = j = 0; i < npages; i++) {
|
||||
struct page *spage;
|
||||
@ -391,14 +385,14 @@ out_free_vram_pages:
|
||||
migrate->dst[i + 3] = 0;
|
||||
}
|
||||
#endif
|
||||
out:
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
static long
|
||||
svm_migrate_vma_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
|
||||
struct vm_area_struct *vma, uint64_t start,
|
||||
uint64_t end, uint32_t trigger)
|
||||
uint64_t end, uint32_t trigger, uint64_t ttm_res_offset)
|
||||
{
|
||||
struct kfd_process *p = container_of(prange->svms, struct kfd_process, svms);
|
||||
uint64_t npages = (end - start) >> PAGE_SHIFT;
|
||||
@ -451,7 +445,7 @@ svm_migrate_vma_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
|
||||
else
|
||||
pr_debug("0x%lx pages migrated\n", cpages);
|
||||
|
||||
r = svm_migrate_copy_to_vram(adev, prange, &migrate, &mfence, scratch);
|
||||
r = svm_migrate_copy_to_vram(adev, prange, &migrate, &mfence, scratch, ttm_res_offset);
|
||||
migrate_vma_pages(&migrate);
|
||||
|
||||
pr_debug("successful/cpages/npages 0x%lx/0x%lx/0x%lx\n",
|
||||
@ -499,6 +493,7 @@ svm_migrate_ram_to_vram(struct svm_range *prange, uint32_t best_loc,
|
||||
unsigned long addr, start, end;
|
||||
struct vm_area_struct *vma;
|
||||
struct amdgpu_device *adev;
|
||||
uint64_t ttm_res_offset;
|
||||
unsigned long cpages = 0;
|
||||
long r = 0;
|
||||
|
||||
@ -520,6 +515,13 @@ svm_migrate_ram_to_vram(struct svm_range *prange, uint32_t best_loc,
|
||||
start = prange->start << PAGE_SHIFT;
|
||||
end = (prange->last + 1) << PAGE_SHIFT;
|
||||
|
||||
r = svm_range_vram_node_new(adev, prange, true);
|
||||
if (r) {
|
||||
dev_dbg(adev->dev, "fail %ld to alloc vram\n", r);
|
||||
return r;
|
||||
}
|
||||
ttm_res_offset = prange->offset << PAGE_SHIFT;
|
||||
|
||||
for (addr = start; addr < end;) {
|
||||
unsigned long next;
|
||||
|
||||
@ -528,18 +530,21 @@ svm_migrate_ram_to_vram(struct svm_range *prange, uint32_t best_loc,
|
||||
break;
|
||||
|
||||
next = min(vma->vm_end, end);
|
||||
r = svm_migrate_vma_to_vram(adev, prange, vma, addr, next, trigger);
|
||||
r = svm_migrate_vma_to_vram(adev, prange, vma, addr, next, trigger, ttm_res_offset);
|
||||
if (r < 0) {
|
||||
pr_debug("failed %ld to migrate\n", r);
|
||||
break;
|
||||
} else {
|
||||
cpages += r;
|
||||
}
|
||||
ttm_res_offset += next - addr;
|
||||
addr = next;
|
||||
}
|
||||
|
||||
if (cpages)
|
||||
prange->actual_loc = best_loc;
|
||||
else
|
||||
svm_range_vram_node_free(prange);
|
||||
|
||||
return r < 0 ? r : 0;
|
||||
}
|
||||
|
@ -77,6 +77,7 @@ err_ioctl:
|
||||
|
||||
static void kfd_exit(void)
|
||||
{
|
||||
kfd_cleanup_processes();
|
||||
kfd_debugfs_fini();
|
||||
kfd_process_destroy_wq();
|
||||
kfd_procfs_shutdown();
|
||||
|
@ -928,6 +928,7 @@ bool kfd_dev_is_large_bar(struct kfd_dev *dev);
|
||||
|
||||
int kfd_process_create_wq(void);
|
||||
void kfd_process_destroy_wq(void);
|
||||
void kfd_cleanup_processes(void);
|
||||
struct kfd_process *kfd_create_process(struct file *filep);
|
||||
struct kfd_process *kfd_get_process(const struct task_struct *task);
|
||||
struct kfd_process *kfd_lookup_process_by_pasid(u32 pasid);
|
||||
|
@ -1167,6 +1167,17 @@ static void kfd_process_free_notifier(struct mmu_notifier *mn)
|
||||
kfd_unref_process(container_of(mn, struct kfd_process, mmu_notifier));
|
||||
}
|
||||
|
||||
static void kfd_process_notifier_release_internal(struct kfd_process *p)
|
||||
{
|
||||
cancel_delayed_work_sync(&p->eviction_work);
|
||||
cancel_delayed_work_sync(&p->restore_work);
|
||||
|
||||
/* Indicate to other users that MM is no longer valid */
|
||||
p->mm = NULL;
|
||||
|
||||
mmu_notifier_put(&p->mmu_notifier);
|
||||
}
|
||||
|
||||
static void kfd_process_notifier_release(struct mmu_notifier *mn,
|
||||
struct mm_struct *mm)
|
||||
{
|
||||
@ -1181,17 +1192,22 @@ static void kfd_process_notifier_release(struct mmu_notifier *mn,
|
||||
return;
|
||||
|
||||
mutex_lock(&kfd_processes_mutex);
|
||||
/*
|
||||
* Do early return if table is empty.
|
||||
*
|
||||
* This could potentially happen if this function is called concurrently
|
||||
* by mmu_notifier and by kfd_cleanup_pocesses.
|
||||
*
|
||||
*/
|
||||
if (hash_empty(kfd_processes_table)) {
|
||||
mutex_unlock(&kfd_processes_mutex);
|
||||
return;
|
||||
}
|
||||
hash_del_rcu(&p->kfd_processes);
|
||||
mutex_unlock(&kfd_processes_mutex);
|
||||
synchronize_srcu(&kfd_processes_srcu);
|
||||
|
||||
cancel_delayed_work_sync(&p->eviction_work);
|
||||
cancel_delayed_work_sync(&p->restore_work);
|
||||
|
||||
/* Indicate to other users that MM is no longer valid */
|
||||
p->mm = NULL;
|
||||
|
||||
mmu_notifier_put(&p->mmu_notifier);
|
||||
kfd_process_notifier_release_internal(p);
|
||||
}
|
||||
|
||||
static const struct mmu_notifier_ops kfd_process_mmu_notifier_ops = {
|
||||
@ -1200,6 +1216,43 @@ static const struct mmu_notifier_ops kfd_process_mmu_notifier_ops = {
|
||||
.free_notifier = kfd_process_free_notifier,
|
||||
};
|
||||
|
||||
/*
|
||||
* This code handles the case when driver is being unloaded before all
|
||||
* mm_struct are released. We need to safely free the kfd_process and
|
||||
* avoid race conditions with mmu_notifier that might try to free them.
|
||||
*
|
||||
*/
|
||||
void kfd_cleanup_processes(void)
|
||||
{
|
||||
struct kfd_process *p;
|
||||
struct hlist_node *p_temp;
|
||||
unsigned int temp;
|
||||
HLIST_HEAD(cleanup_list);
|
||||
|
||||
/*
|
||||
* Move all remaining kfd_process from the process table to a
|
||||
* temp list for processing. Once done, callback from mmu_notifier
|
||||
* release will not see the kfd_process in the table and do early return,
|
||||
* avoiding double free issues.
|
||||
*/
|
||||
mutex_lock(&kfd_processes_mutex);
|
||||
hash_for_each_safe(kfd_processes_table, temp, p_temp, p, kfd_processes) {
|
||||
hash_del_rcu(&p->kfd_processes);
|
||||
synchronize_srcu(&kfd_processes_srcu);
|
||||
hlist_add_head(&p->kfd_processes, &cleanup_list);
|
||||
}
|
||||
mutex_unlock(&kfd_processes_mutex);
|
||||
|
||||
hlist_for_each_entry_safe(p, p_temp, &cleanup_list, kfd_processes)
|
||||
kfd_process_notifier_release_internal(p);
|
||||
|
||||
/*
|
||||
* Ensures that all outstanding free_notifier get called, triggering
|
||||
* the release of the kfd_process struct.
|
||||
*/
|
||||
mmu_notifier_synchronize();
|
||||
}
|
||||
|
||||
static int kfd_process_init_cwsr_apu(struct kfd_process *p, struct file *filep)
|
||||
{
|
||||
unsigned long offset;
|
||||
|
@ -218,8 +218,8 @@ static int init_user_queue(struct process_queue_manager *pqm,
|
||||
return 0;
|
||||
|
||||
cleanup:
|
||||
if (dev->shared_resources.enable_mes)
|
||||
uninit_queue(*q);
|
||||
uninit_queue(*q);
|
||||
*q = NULL;
|
||||
return retval;
|
||||
}
|
||||
|
||||
|
@ -5105,9 +5105,9 @@ static void fill_dc_dirty_rects(struct drm_plane *plane,
|
||||
|
||||
for (; flip_addrs->dirty_rect_count < num_clips; clips++)
|
||||
fill_dc_dirty_rect(new_plane_state->plane,
|
||||
&dirty_rects[i], clips->x1,
|
||||
clips->y1, clips->x2 - clips->x1,
|
||||
clips->y2 - clips->y1,
|
||||
&dirty_rects[flip_addrs->dirty_rect_count],
|
||||
clips->x1, clips->y1,
|
||||
clips->x2 - clips->x1, clips->y2 - clips->y1,
|
||||
&flip_addrs->dirty_rect_count,
|
||||
false);
|
||||
return;
|
||||
|
@ -561,7 +561,7 @@ static void update_config(void *handle, struct cp_psp_stream_config *config)
|
||||
link->dp.mst_enabled = config->mst_enabled;
|
||||
link->dp.usb4_enabled = config->usb4_enabled;
|
||||
display->adjust.disable = MOD_HDCP_DISPLAY_DISABLE_AUTHENTICATION;
|
||||
link->adjust.auth_delay = 0;
|
||||
link->adjust.auth_delay = 2;
|
||||
link->adjust.hdcp1.disable = 0;
|
||||
conn_state = aconnector->base.state;
|
||||
|
||||
|
@ -998,8 +998,5 @@ void dcn30_prepare_bandwidth(struct dc *dc,
|
||||
dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, dc->clk_mgr->bw_params->clk_table.entries[dc->clk_mgr->bw_params->clk_table.num_entries - 1].memclk_mhz);
|
||||
|
||||
dcn20_prepare_bandwidth(dc, context);
|
||||
|
||||
dc_dmub_srv_p_state_delegate(dc,
|
||||
context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching, context);
|
||||
}
|
||||
|
||||
|
@ -1104,7 +1104,7 @@ unsigned int dcn32_calculate_dccg_k1_k2_values(struct pipe_ctx *pipe_ctx, unsign
|
||||
*k2_div = PIXEL_RATE_DIV_BY_2;
|
||||
else
|
||||
*k2_div = PIXEL_RATE_DIV_BY_4;
|
||||
} else if (dc_is_dp_signal(stream->signal) || dc_is_virtual_signal(stream->signal)) {
|
||||
} else if (dc_is_dp_signal(stream->signal)) {
|
||||
if (two_pix_per_container) {
|
||||
*k1_div = PIXEL_RATE_DIV_BY_1;
|
||||
*k2_div = PIXEL_RATE_DIV_BY_2;
|
||||
|
@ -1915,6 +1915,7 @@ int dcn32_populate_dml_pipes_from_context(
|
||||
bool subvp_in_use = false;
|
||||
uint8_t is_pipe_split_expected[MAX_PIPES] = {0};
|
||||
struct dc_crtc_timing *timing;
|
||||
bool vsr_odm_support = false;
|
||||
|
||||
dcn20_populate_dml_pipes_from_context(dc, context, pipes, fast_validate);
|
||||
|
||||
@ -1932,12 +1933,15 @@ int dcn32_populate_dml_pipes_from_context(
|
||||
timing = &pipe->stream->timing;
|
||||
|
||||
pipes[pipe_cnt].pipe.dest.odm_combine_policy = dm_odm_combine_policy_dal;
|
||||
vsr_odm_support = (res_ctx->pipe_ctx[i].stream->src.width >= 5120 &&
|
||||
res_ctx->pipe_ctx[i].stream->src.width > res_ctx->pipe_ctx[i].stream->dst.width);
|
||||
if (context->stream_count == 1 &&
|
||||
context->stream_status[0].plane_count == 1 &&
|
||||
!dc_is_hdmi_signal(res_ctx->pipe_ctx[i].stream->signal) &&
|
||||
is_h_timing_divisible_by_2(res_ctx->pipe_ctx[i].stream) &&
|
||||
pipe->stream->timing.pix_clk_100hz * 100 > DCN3_2_VMIN_DISPCLK_HZ &&
|
||||
dc->debug.enable_single_display_2to1_odm_policy) {
|
||||
dc->debug.enable_single_display_2to1_odm_policy &&
|
||||
!vsr_odm_support) { //excluding 2to1 ODM combine on >= 5k vsr
|
||||
pipes[pipe_cnt].pipe.dest.odm_combine_policy = dm_odm_combine_policy_2to1;
|
||||
}
|
||||
pipe_cnt++;
|
||||
|
@ -855,6 +855,7 @@ static bool detect_link_and_local_sink(struct dc_link *link,
|
||||
struct dc_sink *prev_sink = NULL;
|
||||
struct dpcd_caps prev_dpcd_caps;
|
||||
enum dc_connection_type new_connection_type = dc_connection_none;
|
||||
enum dc_connection_type pre_connection_type = link->type;
|
||||
const uint32_t post_oui_delay = 30; // 30ms
|
||||
|
||||
DC_LOGGER_INIT(link->ctx->logger);
|
||||
@ -957,6 +958,8 @@ static bool detect_link_and_local_sink(struct dc_link *link,
|
||||
}
|
||||
|
||||
if (!detect_dp(link, &sink_caps, reason)) {
|
||||
link->type = pre_connection_type;
|
||||
|
||||
if (prev_sink)
|
||||
dc_sink_release(prev_sink);
|
||||
return false;
|
||||
@ -1244,11 +1247,16 @@ bool link_detect(struct dc_link *link, enum dc_detect_reason reason)
|
||||
bool is_delegated_to_mst_top_mgr = false;
|
||||
enum dc_connection_type pre_link_type = link->type;
|
||||
|
||||
DC_LOGGER_INIT(link->ctx->logger);
|
||||
|
||||
is_local_sink_detect_success = detect_link_and_local_sink(link, reason);
|
||||
|
||||
if (is_local_sink_detect_success && link->local_sink)
|
||||
verify_link_capability(link, link->local_sink, reason);
|
||||
|
||||
DC_LOG_DC("%s: link_index=%d is_local_sink_detect_success=%d pre_link_type=%d link_type=%d\n", __func__,
|
||||
link->link_index, is_local_sink_detect_success, pre_link_type, link->type);
|
||||
|
||||
if (is_local_sink_detect_success && link->local_sink &&
|
||||
dc_is_dp_signal(link->local_sink->sink_signal) &&
|
||||
link->dpcd_caps.is_mst_capable)
|
||||
|
@ -27,7 +27,7 @@
|
||||
// *** IMPORTANT ***
|
||||
// SMU TEAM: Always increment the interface version if
|
||||
// any structure is changed in this file
|
||||
#define PMFW_DRIVER_IF_VERSION 7
|
||||
#define PMFW_DRIVER_IF_VERSION 8
|
||||
|
||||
typedef struct {
|
||||
int32_t value;
|
||||
@ -198,7 +198,7 @@ typedef struct {
|
||||
uint16_t SkinTemp;
|
||||
uint16_t DeviceState;
|
||||
uint16_t CurTemp; //[centi-Celsius]
|
||||
uint16_t spare2;
|
||||
uint16_t FilterAlphaValue;
|
||||
|
||||
uint16_t AverageGfxclkFrequency;
|
||||
uint16_t AverageFclkFrequency;
|
||||
|
@ -29,7 +29,7 @@
|
||||
#define SMU13_DRIVER_IF_VERSION_YELLOW_CARP 0x04
|
||||
#define SMU13_DRIVER_IF_VERSION_ALDE 0x08
|
||||
#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_0_0 0x37
|
||||
#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_4 0x07
|
||||
#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_4 0x08
|
||||
#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_5 0x04
|
||||
#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_0_10 0x32
|
||||
#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_7 0x37
|
||||
|
@ -2143,16 +2143,9 @@ static int sienna_cichlid_set_default_od_settings(struct smu_context *smu)
|
||||
(OverDriveTable_t *)smu->smu_table.boot_overdrive_table;
|
||||
OverDriveTable_t *user_od_table =
|
||||
(OverDriveTable_t *)smu->smu_table.user_overdrive_table;
|
||||
OverDriveTable_t user_od_table_bak;
|
||||
int ret = 0;
|
||||
|
||||
/*
|
||||
* For S3/S4/Runpm resume, no need to setup those overdrive tables again as
|
||||
* - either they already have the default OD settings got during cold bootup
|
||||
* - or they have some user customized OD settings which cannot be overwritten
|
||||
*/
|
||||
if (smu->adev->in_suspend)
|
||||
return 0;
|
||||
|
||||
ret = smu_cmn_update_table(smu, SMU_TABLE_OVERDRIVE,
|
||||
0, (void *)boot_od_table, false);
|
||||
if (ret) {
|
||||
@ -2163,7 +2156,23 @@ static int sienna_cichlid_set_default_od_settings(struct smu_context *smu)
|
||||
sienna_cichlid_dump_od_table(smu, boot_od_table);
|
||||
|
||||
memcpy(od_table, boot_od_table, sizeof(OverDriveTable_t));
|
||||
memcpy(user_od_table, boot_od_table, sizeof(OverDriveTable_t));
|
||||
|
||||
/*
|
||||
* For S3/S4/Runpm resume, we need to setup those overdrive tables again,
|
||||
* but we have to preserve user defined values in "user_od_table".
|
||||
*/
|
||||
if (!smu->adev->in_suspend) {
|
||||
memcpy(user_od_table, boot_od_table, sizeof(OverDriveTable_t));
|
||||
smu->user_dpm_profile.user_od = false;
|
||||
} else if (smu->user_dpm_profile.user_od) {
|
||||
memcpy(&user_od_table_bak, user_od_table, sizeof(OverDriveTable_t));
|
||||
memcpy(user_od_table, boot_od_table, sizeof(OverDriveTable_t));
|
||||
user_od_table->GfxclkFmin = user_od_table_bak.GfxclkFmin;
|
||||
user_od_table->GfxclkFmax = user_od_table_bak.GfxclkFmax;
|
||||
user_od_table->UclkFmin = user_od_table_bak.UclkFmin;
|
||||
user_od_table->UclkFmax = user_od_table_bak.UclkFmax;
|
||||
user_od_table->VddGfxOffset = user_od_table_bak.VddGfxOffset;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -2373,6 +2382,20 @@ static int sienna_cichlid_od_edit_dpm_table(struct smu_context *smu,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int sienna_cichlid_restore_user_od_settings(struct smu_context *smu)
|
||||
{
|
||||
struct smu_table_context *table_context = &smu->smu_table;
|
||||
OverDriveTable_t *od_table = table_context->overdrive_table;
|
||||
OverDriveTable_t *user_od_table = table_context->user_overdrive_table;
|
||||
int res;
|
||||
|
||||
res = smu_v11_0_restore_user_od_settings(smu);
|
||||
if (res == 0)
|
||||
memcpy(od_table, user_od_table, sizeof(OverDriveTable_t));
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
static int sienna_cichlid_run_btc(struct smu_context *smu)
|
||||
{
|
||||
int res;
|
||||
@ -4400,7 +4423,7 @@ static const struct pptable_funcs sienna_cichlid_ppt_funcs = {
|
||||
.set_soft_freq_limited_range = smu_v11_0_set_soft_freq_limited_range,
|
||||
.set_default_od_settings = sienna_cichlid_set_default_od_settings,
|
||||
.od_edit_dpm_table = sienna_cichlid_od_edit_dpm_table,
|
||||
.restore_user_od_settings = smu_v11_0_restore_user_od_settings,
|
||||
.restore_user_od_settings = sienna_cichlid_restore_user_od_settings,
|
||||
.run_btc = sienna_cichlid_run_btc,
|
||||
.set_power_source = smu_v11_0_set_power_source,
|
||||
.get_pp_feature_mask = smu_cmn_get_pp_feature_mask,
|
||||
|
@ -2796,7 +2796,7 @@ u32 drm_edid_get_panel_id(struct i2c_adapter *adapter)
|
||||
* the EDID then we'll just return 0.
|
||||
*/
|
||||
|
||||
base_block = kmalloc(EDID_LENGTH, GFP_KERNEL);
|
||||
base_block = kzalloc(EDID_LENGTH, GFP_KERNEL);
|
||||
if (!base_block)
|
||||
return 0;
|
||||
|
||||
|
@ -1388,10 +1388,13 @@ EXPORT_SYMBOL(drm_gem_lru_move_tail);
|
||||
*
|
||||
* @lru: The LRU to scan
|
||||
* @nr_to_scan: The number of pages to try to reclaim
|
||||
* @remaining: The number of pages left to reclaim, should be initialized by caller
|
||||
* @shrink: Callback to try to shrink/reclaim the object.
|
||||
*/
|
||||
unsigned long
|
||||
drm_gem_lru_scan(struct drm_gem_lru *lru, unsigned nr_to_scan,
|
||||
drm_gem_lru_scan(struct drm_gem_lru *lru,
|
||||
unsigned int nr_to_scan,
|
||||
unsigned long *remaining,
|
||||
bool (*shrink)(struct drm_gem_object *obj))
|
||||
{
|
||||
struct drm_gem_lru still_in_lru;
|
||||
@ -1430,8 +1433,10 @@ drm_gem_lru_scan(struct drm_gem_lru *lru, unsigned nr_to_scan,
|
||||
* hit shrinker in response to trying to get backing pages
|
||||
* for this obj (ie. while it's lock is already held)
|
||||
*/
|
||||
if (!dma_resv_trylock(obj->resv))
|
||||
if (!dma_resv_trylock(obj->resv)) {
|
||||
*remaining += obj->size >> PAGE_SHIFT;
|
||||
goto tail;
|
||||
}
|
||||
|
||||
if (shrink(obj)) {
|
||||
freed += obj->size >> PAGE_SHIFT;
|
||||
|
@ -619,11 +619,14 @@ int drm_gem_shmem_mmap(struct drm_gem_shmem_object *shmem, struct vm_area_struct
|
||||
int ret;
|
||||
|
||||
if (obj->import_attach) {
|
||||
/* Drop the reference drm_gem_mmap_obj() acquired.*/
|
||||
drm_gem_object_put(obj);
|
||||
vma->vm_private_data = NULL;
|
||||
ret = dma_buf_mmap(obj->dma_buf, vma, 0);
|
||||
|
||||
return dma_buf_mmap(obj->dma_buf, vma, 0);
|
||||
/* Drop the reference drm_gem_mmap_obj() acquired.*/
|
||||
if (!ret)
|
||||
drm_gem_object_put(obj);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = drm_gem_shmem_get_pages(shmem);
|
||||
|
@ -1631,6 +1631,8 @@ struct intel_psr {
|
||||
bool psr2_sel_fetch_cff_enabled;
|
||||
bool req_psr2_sdp_prior_scanline;
|
||||
u8 sink_sync_latency;
|
||||
u8 io_wake_lines;
|
||||
u8 fast_wake_lines;
|
||||
ktime_t last_entry_attempt;
|
||||
ktime_t last_exit;
|
||||
bool sink_not_reliable;
|
||||
|
@ -265,6 +265,19 @@ static int intel_dp_mst_update_slots(struct intel_encoder *encoder,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool intel_dp_mst_has_audio(const struct drm_connector_state *conn_state)
|
||||
{
|
||||
const struct intel_digital_connector_state *intel_conn_state =
|
||||
to_intel_digital_connector_state(conn_state);
|
||||
struct intel_connector *connector =
|
||||
to_intel_connector(conn_state->connector);
|
||||
|
||||
if (intel_conn_state->force_audio == HDMI_AUDIO_AUTO)
|
||||
return connector->port->has_audio;
|
||||
else
|
||||
return intel_conn_state->force_audio == HDMI_AUDIO_ON;
|
||||
}
|
||||
|
||||
static int intel_dp_mst_compute_config(struct intel_encoder *encoder,
|
||||
struct intel_crtc_state *pipe_config,
|
||||
struct drm_connector_state *conn_state)
|
||||
@ -272,10 +285,6 @@ static int intel_dp_mst_compute_config(struct intel_encoder *encoder,
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
|
||||
struct intel_dp *intel_dp = &intel_mst->primary->dp;
|
||||
struct intel_connector *connector =
|
||||
to_intel_connector(conn_state->connector);
|
||||
struct intel_digital_connector_state *intel_conn_state =
|
||||
to_intel_digital_connector_state(conn_state);
|
||||
const struct drm_display_mode *adjusted_mode =
|
||||
&pipe_config->hw.adjusted_mode;
|
||||
struct link_config_limits limits;
|
||||
@ -287,11 +296,9 @@ static int intel_dp_mst_compute_config(struct intel_encoder *encoder,
|
||||
pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
|
||||
pipe_config->has_pch_encoder = false;
|
||||
|
||||
if (intel_conn_state->force_audio == HDMI_AUDIO_AUTO)
|
||||
pipe_config->has_audio = connector->port->has_audio;
|
||||
else
|
||||
pipe_config->has_audio =
|
||||
intel_conn_state->force_audio == HDMI_AUDIO_ON;
|
||||
pipe_config->has_audio =
|
||||
intel_dp_mst_has_audio(conn_state) &&
|
||||
intel_audio_compute_config(encoder, pipe_config, conn_state);
|
||||
|
||||
/*
|
||||
* for MST we always configure max link bw - the spec doesn't
|
||||
|
@ -542,6 +542,14 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp)
|
||||
val |= EDP_PSR2_FRAME_BEFORE_SU(max_t(u8, intel_dp->psr.sink_sync_latency + 1, 2));
|
||||
val |= intel_psr2_get_tp_time(intel_dp);
|
||||
|
||||
if (DISPLAY_VER(dev_priv) >= 12) {
|
||||
if (intel_dp->psr.io_wake_lines < 9 &&
|
||||
intel_dp->psr.fast_wake_lines < 9)
|
||||
val |= TGL_EDP_PSR2_BLOCK_COUNT_NUM_2;
|
||||
else
|
||||
val |= TGL_EDP_PSR2_BLOCK_COUNT_NUM_3;
|
||||
}
|
||||
|
||||
/* Wa_22012278275:adl-p */
|
||||
if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_E0)) {
|
||||
static const u8 map[] = {
|
||||
@ -558,31 +566,21 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp)
|
||||
* Still using the default IO_BUFFER_WAKE and FAST_WAKE, see
|
||||
* comments bellow for more information
|
||||
*/
|
||||
u32 tmp, lines = 7;
|
||||
u32 tmp;
|
||||
|
||||
val |= TGL_EDP_PSR2_BLOCK_COUNT_NUM_2;
|
||||
|
||||
tmp = map[lines - TGL_EDP_PSR2_IO_BUFFER_WAKE_MIN_LINES];
|
||||
tmp = map[intel_dp->psr.io_wake_lines - TGL_EDP_PSR2_IO_BUFFER_WAKE_MIN_LINES];
|
||||
tmp = tmp << TGL_EDP_PSR2_IO_BUFFER_WAKE_SHIFT;
|
||||
val |= tmp;
|
||||
|
||||
tmp = map[lines - TGL_EDP_PSR2_FAST_WAKE_MIN_LINES];
|
||||
tmp = map[intel_dp->psr.fast_wake_lines - TGL_EDP_PSR2_FAST_WAKE_MIN_LINES];
|
||||
tmp = tmp << TGL_EDP_PSR2_FAST_WAKE_MIN_SHIFT;
|
||||
val |= tmp;
|
||||
} else if (DISPLAY_VER(dev_priv) >= 12) {
|
||||
/*
|
||||
* TODO: 7 lines of IO_BUFFER_WAKE and FAST_WAKE are default
|
||||
* values from BSpec. In order to setting an optimal power
|
||||
* consumption, lower than 4k resolution mode needs to decrease
|
||||
* IO_BUFFER_WAKE and FAST_WAKE. And higher than 4K resolution
|
||||
* mode needs to increase IO_BUFFER_WAKE and FAST_WAKE.
|
||||
*/
|
||||
val |= TGL_EDP_PSR2_BLOCK_COUNT_NUM_2;
|
||||
val |= TGL_EDP_PSR2_IO_BUFFER_WAKE(7);
|
||||
val |= TGL_EDP_PSR2_FAST_WAKE(7);
|
||||
val |= TGL_EDP_PSR2_IO_BUFFER_WAKE(intel_dp->psr.io_wake_lines);
|
||||
val |= TGL_EDP_PSR2_FAST_WAKE(intel_dp->psr.fast_wake_lines);
|
||||
} else if (DISPLAY_VER(dev_priv) >= 9) {
|
||||
val |= EDP_PSR2_IO_BUFFER_WAKE(7);
|
||||
val |= EDP_PSR2_FAST_WAKE(7);
|
||||
val |= EDP_PSR2_IO_BUFFER_WAKE(intel_dp->psr.io_wake_lines);
|
||||
val |= EDP_PSR2_FAST_WAKE(intel_dp->psr.fast_wake_lines);
|
||||
}
|
||||
|
||||
if (intel_dp->psr.req_psr2_sdp_prior_scanline)
|
||||
@ -842,6 +840,46 @@ static bool _compute_psr2_sdp_prior_scanline_indication(struct intel_dp *intel_d
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool _compute_psr2_wake_times(struct intel_dp *intel_dp,
|
||||
struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
|
||||
int io_wake_lines, io_wake_time, fast_wake_lines, fast_wake_time;
|
||||
u8 max_wake_lines;
|
||||
|
||||
if (DISPLAY_VER(i915) >= 12) {
|
||||
io_wake_time = 42;
|
||||
/*
|
||||
* According to Bspec it's 42us, but based on testing
|
||||
* it is not enough -> use 45 us.
|
||||
*/
|
||||
fast_wake_time = 45;
|
||||
max_wake_lines = 12;
|
||||
} else {
|
||||
io_wake_time = 50;
|
||||
fast_wake_time = 32;
|
||||
max_wake_lines = 8;
|
||||
}
|
||||
|
||||
io_wake_lines = intel_usecs_to_scanlines(
|
||||
&crtc_state->uapi.adjusted_mode, io_wake_time);
|
||||
fast_wake_lines = intel_usecs_to_scanlines(
|
||||
&crtc_state->uapi.adjusted_mode, fast_wake_time);
|
||||
|
||||
if (io_wake_lines > max_wake_lines ||
|
||||
fast_wake_lines > max_wake_lines)
|
||||
return false;
|
||||
|
||||
if (i915->params.psr_safest_params)
|
||||
io_wake_lines = fast_wake_lines = max_wake_lines;
|
||||
|
||||
/* According to Bspec lower limit should be set as 7 lines. */
|
||||
intel_dp->psr.io_wake_lines = max(io_wake_lines, 7);
|
||||
intel_dp->psr.fast_wake_lines = max(fast_wake_lines, 7);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
|
||||
struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
@ -936,6 +974,12 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!_compute_psr2_wake_times(intel_dp, crtc_state)) {
|
||||
drm_dbg_kms(&dev_priv->drm,
|
||||
"PSR2 not enabled, Unable to use long enough wake times\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
if (HAS_PSR2_SEL_FETCH(dev_priv)) {
|
||||
if (!intel_psr2_sel_fetch_config_valid(intel_dp, crtc_state) &&
|
||||
!HAS_PSR_HW_TRACKING(dev_priv)) {
|
||||
|
@ -1419,6 +1419,36 @@ static const struct intel_mpllb_state dg2_hdmi_262750 = {
|
||||
REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
|
||||
};
|
||||
|
||||
static const struct intel_mpllb_state dg2_hdmi_267300 = {
|
||||
.clock = 267300,
|
||||
.ref_control =
|
||||
REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
|
||||
.mpllb_cp =
|
||||
REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 7) |
|
||||
REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) |
|
||||
REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) |
|
||||
REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124),
|
||||
.mpllb_div =
|
||||
REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
|
||||
REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 1) |
|
||||
REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) |
|
||||
REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
|
||||
REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 3),
|
||||
.mpllb_div2 =
|
||||
REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
|
||||
REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 74) |
|
||||
REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1),
|
||||
.mpllb_fracn1 =
|
||||
REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
|
||||
REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) |
|
||||
REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535),
|
||||
.mpllb_fracn2 =
|
||||
REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 30146) |
|
||||
REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 36699),
|
||||
.mpllb_sscen =
|
||||
REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
|
||||
};
|
||||
|
||||
static const struct intel_mpllb_state dg2_hdmi_268500 = {
|
||||
.clock = 268500,
|
||||
.ref_control =
|
||||
@ -1509,6 +1539,36 @@ static const struct intel_mpllb_state dg2_hdmi_241500 = {
|
||||
REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
|
||||
};
|
||||
|
||||
static const struct intel_mpllb_state dg2_hdmi_319890 = {
|
||||
.clock = 319890,
|
||||
.ref_control =
|
||||
REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
|
||||
.mpllb_cp =
|
||||
REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) |
|
||||
REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) |
|
||||
REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) |
|
||||
REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124),
|
||||
.mpllb_div =
|
||||
REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
|
||||
REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 1) |
|
||||
REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) |
|
||||
REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
|
||||
REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 2),
|
||||
.mpllb_div2 =
|
||||
REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
|
||||
REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 94) |
|
||||
REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1),
|
||||
.mpllb_fracn1 =
|
||||
REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
|
||||
REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) |
|
||||
REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535),
|
||||
.mpllb_fracn2 =
|
||||
REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 64094) |
|
||||
REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 13631),
|
||||
.mpllb_sscen =
|
||||
REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
|
||||
};
|
||||
|
||||
static const struct intel_mpllb_state dg2_hdmi_497750 = {
|
||||
.clock = 497750,
|
||||
.ref_control =
|
||||
@ -1696,8 +1756,10 @@ static const struct intel_mpllb_state * const dg2_hdmi_tables[] = {
|
||||
&dg2_hdmi_209800,
|
||||
&dg2_hdmi_241500,
|
||||
&dg2_hdmi_262750,
|
||||
&dg2_hdmi_267300,
|
||||
&dg2_hdmi_268500,
|
||||
&dg2_hdmi_296703,
|
||||
&dg2_hdmi_319890,
|
||||
&dg2_hdmi_497750,
|
||||
&dg2_hdmi_592000,
|
||||
&dg2_hdmi_593407,
|
||||
|
@ -27,7 +27,7 @@ struct drm_printer;
|
||||
* is only relevant to pre-Xe_HP platforms (Xe_HP and beyond use the
|
||||
* I915_MAX_SS_FUSE_BITS value below).
|
||||
*/
|
||||
#define GEN_MAX_SS_PER_HSW_SLICE 6
|
||||
#define GEN_MAX_SS_PER_HSW_SLICE 8
|
||||
|
||||
/*
|
||||
* Maximum number of 32-bit registers used by hardware to express the
|
||||
|
@ -422,12 +422,12 @@ replace_barrier(struct i915_active *ref, struct i915_active_fence *active)
|
||||
* we can use it to substitute for the pending idle-barrer
|
||||
* request that we want to emit on the kernel_context.
|
||||
*/
|
||||
__active_del_barrier(ref, node_from_active(active));
|
||||
return true;
|
||||
return __active_del_barrier(ref, node_from_active(active));
|
||||
}
|
||||
|
||||
int i915_active_add_request(struct i915_active *ref, struct i915_request *rq)
|
||||
{
|
||||
u64 idx = i915_request_timeline(rq)->fence_context;
|
||||
struct dma_fence *fence = &rq->fence;
|
||||
struct i915_active_fence *active;
|
||||
int err;
|
||||
@ -437,16 +437,19 @@ int i915_active_add_request(struct i915_active *ref, struct i915_request *rq)
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
active = active_instance(ref, i915_request_timeline(rq)->fence_context);
|
||||
if (!active) {
|
||||
err = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
do {
|
||||
active = active_instance(ref, idx);
|
||||
if (!active) {
|
||||
err = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (replace_barrier(ref, active)) {
|
||||
RCU_INIT_POINTER(active->fence, NULL);
|
||||
atomic_dec(&ref->count);
|
||||
}
|
||||
} while (unlikely(is_barrier(active)));
|
||||
|
||||
if (replace_barrier(ref, active)) {
|
||||
RCU_INIT_POINTER(active->fence, NULL);
|
||||
atomic_dec(&ref->count);
|
||||
}
|
||||
if (!__i915_active_fence_set(active, fence))
|
||||
__i915_active_acquire(ref);
|
||||
|
||||
|
@ -687,6 +687,11 @@ hwm_get_preregistration_info(struct drm_i915_private *i915)
|
||||
for_each_gt(gt, i915, i)
|
||||
hwm_energy(&hwmon->ddat_gt[i], &energy);
|
||||
}
|
||||
|
||||
/* Enable PL1 power limit */
|
||||
if (i915_mmio_reg_valid(hwmon->rg.pkg_rapl_limit))
|
||||
hwm_locked_with_pm_intel_uncore_rmw(ddat, hwmon->rg.pkg_rapl_limit,
|
||||
PKG_PWR_LIM_1_EN, PKG_PWR_LIM_1_EN);
|
||||
}
|
||||
|
||||
void i915_hwmon_register(struct drm_i915_private *i915)
|
||||
|
@ -718,7 +718,7 @@ static int meson_dw_hdmi_bind(struct device *dev, struct device *master,
|
||||
dw_plat_data = &meson_dw_hdmi->dw_plat_data;
|
||||
|
||||
ret = devm_regulator_get_enable_optional(dev, "hdmi");
|
||||
if (ret < 0)
|
||||
if (ret < 0 && ret != -ENODEV)
|
||||
return ret;
|
||||
|
||||
meson_dw_hdmi->hdmitx_apb = devm_reset_control_get_exclusive(dev,
|
||||
|
@ -100,6 +100,8 @@ void meson_vpp_init(struct meson_drm *priv)
|
||||
priv->io_base + _REG(VPP_DOLBY_CTRL));
|
||||
writel_relaxed(0x1020080,
|
||||
priv->io_base + _REG(VPP_DUMMY_DATA1));
|
||||
writel_relaxed(0x42020,
|
||||
priv->io_base + _REG(VPP_DUMMY_DATA));
|
||||
} else if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A))
|
||||
writel_relaxed(0xf, priv->io_base + _REG(DOLBY_PATH_CTRL));
|
||||
|
||||
|
@ -107,6 +107,7 @@ msm_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
|
||||
bool (*shrink)(struct drm_gem_object *obj);
|
||||
bool cond;
|
||||
unsigned long freed;
|
||||
unsigned long remaining;
|
||||
} stages[] = {
|
||||
/* Stages of progressively more aggressive/expensive reclaim: */
|
||||
{ &priv->lru.dontneed, purge, true },
|
||||
@ -116,14 +117,18 @@ msm_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
|
||||
};
|
||||
long nr = sc->nr_to_scan;
|
||||
unsigned long freed = 0;
|
||||
unsigned long remaining = 0;
|
||||
|
||||
for (unsigned i = 0; (nr > 0) && (i < ARRAY_SIZE(stages)); i++) {
|
||||
if (!stages[i].cond)
|
||||
continue;
|
||||
stages[i].freed =
|
||||
drm_gem_lru_scan(stages[i].lru, nr, stages[i].shrink);
|
||||
drm_gem_lru_scan(stages[i].lru, nr,
|
||||
&stages[i].remaining,
|
||||
stages[i].shrink);
|
||||
nr -= stages[i].freed;
|
||||
freed += stages[i].freed;
|
||||
remaining += stages[i].remaining;
|
||||
}
|
||||
|
||||
if (freed) {
|
||||
@ -132,7 +137,7 @@ msm_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
|
||||
stages[3].freed);
|
||||
}
|
||||
|
||||
return (freed > 0) ? freed : SHRINK_STOP;
|
||||
return (freed > 0 && remaining > 0) ? freed : SHRINK_STOP;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
@ -182,10 +187,12 @@ msm_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr)
|
||||
NULL,
|
||||
};
|
||||
unsigned idx, unmapped = 0;
|
||||
unsigned long remaining = 0;
|
||||
|
||||
for (idx = 0; lrus[idx] && unmapped < vmap_shrink_limit; idx++) {
|
||||
unmapped += drm_gem_lru_scan(lrus[idx],
|
||||
vmap_shrink_limit - unmapped,
|
||||
&remaining,
|
||||
vmap_shrink);
|
||||
}
|
||||
|
||||
|
@ -282,7 +282,7 @@ static void panfrost_mmu_flush_range(struct panfrost_device *pfdev,
|
||||
if (pm_runtime_active(pfdev->dev))
|
||||
mmu_hw_do_operation(pfdev, mmu, iova, size, AS_COMMAND_FLUSH_PT);
|
||||
|
||||
pm_runtime_put_sync_autosuspend(pfdev->dev);
|
||||
pm_runtime_put_autosuspend(pfdev->dev);
|
||||
}
|
||||
|
||||
static int mmu_map_sg(struct panfrost_device *pfdev, struct panfrost_mmu *mmu,
|
||||
|
@ -95,12 +95,12 @@ static int sun4i_drv_bind(struct device *dev)
|
||||
/* drm_vblank_init calls kcalloc, which can fail */
|
||||
ret = drm_vblank_init(drm, drm->mode_config.num_crtc);
|
||||
if (ret)
|
||||
goto cleanup_mode_config;
|
||||
goto unbind_all;
|
||||
|
||||
/* Remove early framebuffers (ie. simplefb) */
|
||||
ret = drm_aperture_remove_framebuffers(false, &sun4i_drv_driver);
|
||||
if (ret)
|
||||
goto cleanup_mode_config;
|
||||
goto unbind_all;
|
||||
|
||||
sun4i_framebuffer_init(drm);
|
||||
|
||||
@ -119,6 +119,8 @@ static int sun4i_drv_bind(struct device *dev)
|
||||
|
||||
finish_poll:
|
||||
drm_kms_helper_poll_fini(drm);
|
||||
unbind_all:
|
||||
component_unbind_all(dev, NULL);
|
||||
cleanup_mode_config:
|
||||
drm_mode_config_cleanup(drm);
|
||||
of_reserved_mem_device_release(dev);
|
||||
|
@ -295,8 +295,6 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
|
||||
if (unlock_resv)
|
||||
dma_resv_unlock(bo->base.resv);
|
||||
|
||||
ttm_bo_put(bo);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -158,7 +158,7 @@ int ttm_device_swapout(struct ttm_device *bdev, struct ttm_operation_ctx *ctx,
|
||||
struct ttm_buffer_object *bo = res->bo;
|
||||
uint32_t num_pages;
|
||||
|
||||
if (!bo)
|
||||
if (!bo || bo->resource != res)
|
||||
continue;
|
||||
|
||||
num_pages = PFN_UP(bo->base.size);
|
||||
|
@ -604,7 +604,7 @@ void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
|
||||
bool use_dma_api = !virtio_has_dma_quirk(vgdev->vdev);
|
||||
|
||||
if (virtio_gpu_is_shmem(bo) && use_dma_api)
|
||||
dma_sync_sgtable_for_device(&vgdev->vdev->dev,
|
||||
dma_sync_sgtable_for_device(vgdev->vdev->dev.parent,
|
||||
bo->base.sgt, DMA_TO_DEVICE);
|
||||
|
||||
cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
|
||||
@ -1026,7 +1026,7 @@ void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
|
||||
bool use_dma_api = !virtio_has_dma_quirk(vgdev->vdev);
|
||||
|
||||
if (virtio_gpu_is_shmem(bo) && use_dma_api)
|
||||
dma_sync_sgtable_for_device(&vgdev->vdev->dev,
|
||||
dma_sync_sgtable_for_device(vgdev->vdev->dev.parent,
|
||||
bo->base.sgt, DMA_TO_DEVICE);
|
||||
|
||||
cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
|
||||
|
@ -358,16 +358,21 @@ static int chipsfb_pci_init(struct pci_dev *dp, const struct pci_device_id *ent)
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
if (pci_enable_device(dp) < 0) {
|
||||
rc = pci_enable_device(dp);
|
||||
if (rc < 0) {
|
||||
dev_err(&dp->dev, "Cannot enable PCI device\n");
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
if ((dp->resource[0].flags & IORESOURCE_MEM) == 0)
|
||||
if ((dp->resource[0].flags & IORESOURCE_MEM) == 0) {
|
||||
rc = -ENODEV;
|
||||
goto err_disable;
|
||||
}
|
||||
addr = pci_resource_start(dp, 0);
|
||||
if (addr == 0)
|
||||
if (addr == 0) {
|
||||
rc = -ENODEV;
|
||||
goto err_disable;
|
||||
}
|
||||
|
||||
p = framebuffer_alloc(0, &dp->dev);
|
||||
if (p == NULL) {
|
||||
@ -417,7 +422,8 @@ static int chipsfb_pci_init(struct pci_dev *dp, const struct pci_device_id *ent)
|
||||
|
||||
init_chips(p, addr);
|
||||
|
||||
if (register_framebuffer(p) < 0) {
|
||||
rc = register_framebuffer(p);
|
||||
if (rc < 0) {
|
||||
dev_err(&dp->dev,"C&T 65550 framebuffer failed to register\n");
|
||||
goto err_unmap;
|
||||
}
|
||||
|
@ -305,17 +305,18 @@ void fb_deferred_io_open(struct fb_info *info,
|
||||
struct inode *inode,
|
||||
struct file *file)
|
||||
{
|
||||
struct fb_deferred_io *fbdefio = info->fbdefio;
|
||||
|
||||
file->f_mapping->a_ops = &fb_deferred_io_aops;
|
||||
fbdefio->open_count++;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(fb_deferred_io_open);
|
||||
|
||||
void fb_deferred_io_release(struct fb_info *info)
|
||||
static void fb_deferred_io_lastclose(struct fb_info *info)
|
||||
{
|
||||
struct fb_deferred_io *fbdefio = info->fbdefio;
|
||||
struct page *page;
|
||||
int i;
|
||||
|
||||
BUG_ON(!fbdefio);
|
||||
cancel_delayed_work_sync(&info->deferred_work);
|
||||
|
||||
/* clear out the mapping that we setup */
|
||||
@ -324,13 +325,21 @@ void fb_deferred_io_release(struct fb_info *info)
|
||||
page->mapping = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
void fb_deferred_io_release(struct fb_info *info)
|
||||
{
|
||||
struct fb_deferred_io *fbdefio = info->fbdefio;
|
||||
|
||||
if (!--fbdefio->open_count)
|
||||
fb_deferred_io_lastclose(info);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(fb_deferred_io_release);
|
||||
|
||||
void fb_deferred_io_cleanup(struct fb_info *info)
|
||||
{
|
||||
struct fb_deferred_io *fbdefio = info->fbdefio;
|
||||
|
||||
fb_deferred_io_release(info);
|
||||
fb_deferred_io_lastclose(info);
|
||||
|
||||
kvfree(info->pagerefs);
|
||||
mutex_destroy(&fbdefio->lock);
|
||||
|
@ -423,11 +423,11 @@ struct drm_bridge_funcs {
|
||||
*
|
||||
* The returned array must be allocated with kmalloc() and will be
|
||||
* freed by the caller. If the allocation fails, NULL should be
|
||||
* returned. num_output_fmts must be set to the returned array size.
|
||||
* returned. num_input_fmts must be set to the returned array size.
|
||||
* Formats listed in the returned array should be listed in decreasing
|
||||
* preference order (the core will try all formats until it finds one
|
||||
* that works). When the format is not supported NULL should be
|
||||
* returned and num_output_fmts should be set to 0.
|
||||
* returned and num_input_fmts should be set to 0.
|
||||
*
|
||||
* This method is called on all elements of the bridge chain as part of
|
||||
* the bus format negotiation process that happens in
|
||||
|
@ -476,7 +476,9 @@ int drm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
|
||||
void drm_gem_lru_init(struct drm_gem_lru *lru, struct mutex *lock);
|
||||
void drm_gem_lru_remove(struct drm_gem_object *obj);
|
||||
void drm_gem_lru_move_tail(struct drm_gem_lru *lru, struct drm_gem_object *obj);
|
||||
unsigned long drm_gem_lru_scan(struct drm_gem_lru *lru, unsigned nr_to_scan,
|
||||
unsigned long drm_gem_lru_scan(struct drm_gem_lru *lru,
|
||||
unsigned int nr_to_scan,
|
||||
unsigned long *remaining,
|
||||
bool (*shrink)(struct drm_gem_object *obj));
|
||||
|
||||
#endif /* __DRM_GEM_H__ */
|
||||
|
@ -212,6 +212,7 @@ struct fb_deferred_io {
|
||||
/* delay between mkwrite and deferred handler */
|
||||
unsigned long delay;
|
||||
bool sort_pagereflist; /* sort pagelist by offset */
|
||||
int open_count; /* number of opened files; protected by fb_info lock */
|
||||
struct mutex lock; /* mutex that protects the pageref list */
|
||||
struct list_head pagereflist; /* list of pagerefs for touched pages */
|
||||
/* callback */
|
||||
|
Loading…
Reference in New Issue
Block a user