mirror of
https://github.com/torvalds/linux.git
synced 2024-11-21 19:41:42 +00:00
drm fixes for 6.11-rc6
ttm: - revert prefault change, caused stutters aperture: - handle non-VGA devices bettter amdgpu: - SWSMU gaming stability fix - SMU 13.0.7 fix - SWSMU documentation alignment fix - SMU 14.0.x fixes - GC 12.x fix - Display fix - IP discovery fix - SMU 13.0.6 fix i915: - Fix #11195: The external display connect via USB type-C dock stays blank after re-connect the dock - Make DSI backlight work for 2G version of Lenovo Yoga Tab 3 X90F - Move ARL GuC firmware to correct version xe: - Invalidate media_gt TLBs - Fix HWMON i1 power setup write command vmwgfx: - prevent unmapping active read buffers - fix prime with external buffers - disable coherent dumb buffers without 3d v3d: - disable preemption while updating GPU stats -----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEEEKbZHaGwW9KfbeusDHTzWXnEhr4FAmbRKHMACgkQDHTzWXnE hr6jeBAAko96oI4Na0pDb61mdwbN0dJVRC96xMsHUQahvcwTc4HKUjis9FZ8lluX inXe3G4ZzH0exjZdOwWWhqKU2Oy7BaMqds/cX444rESEDcISD69EBGEvgf0SI5+u czJD+mWDcsGUu/qVN+07ifKRtfD7munu5f4hAr2It+SRjQ98U6kSWPZlPnFpxT9/ dg8dCRk5F67S2x4OUYmzxMuavq/jK84LzpGTjWqk+EfTwb09ElXMmi0a6iYsjeJF +HVOMLn71nVAgb4tSimKGV0gHAT8yLAv6MBnO7HrWdEgLRGKZdRIJHyVmqbf7SVQ qPcKOV7BEBnFTzIUMTR6veZoPNBEyjXlDbmNHws2uWuMKBvZzFJXeuxnLqP0/4H8 QfWoa//aWgJl1hRBV+vgUweygv+zE3jfQNNod7AMdGEZkkTZlbZ/dmLDfvtZ+aPL l2NV1ybBpkiocKQBfl8LlnMFnY12u/W0DGRewbFVZ1X/VduHxmZp76ioH7Ge0f9e 4sfJnWvGOd/UIG6f5vdO9VCivodpijhcXCaYQOjXZe8qLd6i6i9b9rvdiXnQ+0av tH3z2bKapr/fLt2fYF/aWh3h4KZVFbjmQ1gbvHVKCez9/KR8Yjj5QleRDmq1kRO+ zKufDCWLpnj6RXBOvWH9MeRXQss+jXA4b58YXJ7Px6iTtrQYsss= =IfxC -----END PGP SIGNATURE----- Merge tag 'drm-fixes-2024-08-30' of https://gitlab.freedesktop.org/drm/kernel Pull drm fixes from Dave Airlie: "Another week, another set of GPU fixes. amdgpu and vmwgfx leading the charge, then i915 and xe changes along with v3d and some other bits. The TTM revert is due to some stuttering graphical apps probably due to longer stalls while prefaulting. Seems pretty much where I'd expect things, ttm: - revert prefault change, caused stutters aperture: - handle non-VGA devices bettter amdgpu: - SWSMU gaming stability fix - SMU 13.0.7 fix - SWSMU documentation alignment fix - SMU 14.0.x fixes - GC 12.x fix - Display fix - IP discovery fix - SMU 13.0.6 fix i915: - Fix #11195: The external display connect via USB type-C dock stays blank after re-connect the dock - Make DSI backlight work for 2G version of Lenovo Yoga Tab 3 X90F - Move ARL GuC firmware to correct version xe: - Invalidate media_gt TLBs - Fix HWMON i1 power setup write command vmwgfx: - prevent unmapping active read buffers - fix prime with external buffers - disable coherent dumb buffers without 3d v3d: - disable preemption while updating GPU stats" * tag 'drm-fixes-2024-08-30' of https://gitlab.freedesktop.org/drm/kernel: drm/xe/hwmon: Fix WRITE_I1 param from u32 to u16 drm/v3d: Disable preemption while updating GPU stats drm/amd/pm: Drop unsupported features on smu v14_0_2 drm/amd/pm: Add support for new P2S table revision drm/amdgpu: support for gc_info table v1.3 drm/amd/display: avoid using null object of framebuffer drm/amdgpu/gfx12: set UNORD_DISPATCH in compute MQDs drm/amd/pm: update message interface for smu v14.0.2/3 drm/amdgpu/swsmu: always force a state reprogram on init drm/amdgpu/smu13.0.7: print index for profiles drm/amdgpu: align pp_power_profile_mode with kernel docs drm/i915/dp_mst: Fix MST state after a sink reset drm/xe: Invalidate media_gt TLBs drm/i915: ARL requires a newer GSC firmware drm/i915/dsi: Make Lenovo Yoga Tab 3 X90F DMI match less strict video/aperture: optionally match the device in sysfb_disable() drm/vmwgfx: Disable coherent dumb buffers without 3d drm/vmwgfx: Fix prime with external buffers drm/vmwgfx: Prevent unmapping active read buffers Revert "drm/ttm: increase ttm pre-fault value to PMD size"
This commit is contained in:
commit
20371ba120
@ -39,6 +39,8 @@ static struct platform_device *pd;
|
||||
static DEFINE_MUTEX(disable_lock);
|
||||
static bool disabled;
|
||||
|
||||
static struct device *sysfb_parent_dev(const struct screen_info *si);
|
||||
|
||||
static bool sysfb_unregister(void)
|
||||
{
|
||||
if (IS_ERR_OR_NULL(pd))
|
||||
@ -52,6 +54,7 @@ static bool sysfb_unregister(void)
|
||||
|
||||
/**
|
||||
* sysfb_disable() - disable the Generic System Framebuffers support
|
||||
* @dev: the device to check if non-NULL
|
||||
*
|
||||
* This disables the registration of system framebuffer devices that match the
|
||||
* generic drivers that make use of the system framebuffer set up by firmware.
|
||||
@ -61,17 +64,21 @@ static bool sysfb_unregister(void)
|
||||
* Context: The function can sleep. A @disable_lock mutex is acquired to serialize
|
||||
* against sysfb_init(), that registers a system framebuffer device.
|
||||
*/
|
||||
void sysfb_disable(void)
|
||||
void sysfb_disable(struct device *dev)
|
||||
{
|
||||
struct screen_info *si = &screen_info;
|
||||
|
||||
mutex_lock(&disable_lock);
|
||||
sysfb_unregister();
|
||||
disabled = true;
|
||||
if (!dev || dev == sysfb_parent_dev(si)) {
|
||||
sysfb_unregister();
|
||||
disabled = true;
|
||||
}
|
||||
mutex_unlock(&disable_lock);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(sysfb_disable);
|
||||
|
||||
#if defined(CONFIG_PCI)
|
||||
static __init bool sysfb_pci_dev_is_enabled(struct pci_dev *pdev)
|
||||
static bool sysfb_pci_dev_is_enabled(struct pci_dev *pdev)
|
||||
{
|
||||
/*
|
||||
* TODO: Try to integrate this code into the PCI subsystem
|
||||
@ -87,13 +94,13 @@ static __init bool sysfb_pci_dev_is_enabled(struct pci_dev *pdev)
|
||||
return true;
|
||||
}
|
||||
#else
|
||||
static __init bool sysfb_pci_dev_is_enabled(struct pci_dev *pdev)
|
||||
static bool sysfb_pci_dev_is_enabled(struct pci_dev *pdev)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
#endif
|
||||
|
||||
static __init struct device *sysfb_parent_dev(const struct screen_info *si)
|
||||
static struct device *sysfb_parent_dev(const struct screen_info *si)
|
||||
{
|
||||
struct pci_dev *pdev;
|
||||
|
||||
|
@ -1500,6 +1500,7 @@ union gc_info {
|
||||
struct gc_info_v1_0 v1;
|
||||
struct gc_info_v1_1 v1_1;
|
||||
struct gc_info_v1_2 v1_2;
|
||||
struct gc_info_v1_3 v1_3;
|
||||
struct gc_info_v2_0 v2;
|
||||
struct gc_info_v2_1 v2_1;
|
||||
};
|
||||
@ -1558,6 +1559,16 @@ static int amdgpu_discovery_get_gfx_info(struct amdgpu_device *adev)
|
||||
adev->gfx.config.gc_gl1c_size_per_instance = le32_to_cpu(gc_info->v1_2.gc_gl1c_size_per_instance);
|
||||
adev->gfx.config.gc_gl2c_per_gpu = le32_to_cpu(gc_info->v1_2.gc_gl2c_per_gpu);
|
||||
}
|
||||
if (le16_to_cpu(gc_info->v1.header.version_minor) >= 3) {
|
||||
adev->gfx.config.gc_tcp_size_per_cu = le32_to_cpu(gc_info->v1_3.gc_tcp_size_per_cu);
|
||||
adev->gfx.config.gc_tcp_cache_line_size = le32_to_cpu(gc_info->v1_3.gc_tcp_cache_line_size);
|
||||
adev->gfx.config.gc_instruction_cache_size_per_sqc = le32_to_cpu(gc_info->v1_3.gc_instruction_cache_size_per_sqc);
|
||||
adev->gfx.config.gc_instruction_cache_line_size = le32_to_cpu(gc_info->v1_3.gc_instruction_cache_line_size);
|
||||
adev->gfx.config.gc_scalar_data_cache_size_per_sqc = le32_to_cpu(gc_info->v1_3.gc_scalar_data_cache_size_per_sqc);
|
||||
adev->gfx.config.gc_scalar_data_cache_line_size = le32_to_cpu(gc_info->v1_3.gc_scalar_data_cache_line_size);
|
||||
adev->gfx.config.gc_tcc_size = le32_to_cpu(gc_info->v1_3.gc_tcc_size);
|
||||
adev->gfx.config.gc_tcc_cache_line_size = le32_to_cpu(gc_info->v1_3.gc_tcc_cache_line_size);
|
||||
}
|
||||
break;
|
||||
case 2:
|
||||
adev->gfx.config.max_shader_engines = le32_to_cpu(gc_info->v2.gc_num_se);
|
||||
|
@ -240,6 +240,12 @@ struct amdgpu_gfx_config {
|
||||
uint32_t gc_tcp_size_per_cu;
|
||||
uint32_t gc_num_cu_per_sqc;
|
||||
uint32_t gc_tcc_size;
|
||||
uint32_t gc_tcp_cache_line_size;
|
||||
uint32_t gc_instruction_cache_size_per_sqc;
|
||||
uint32_t gc_instruction_cache_line_size;
|
||||
uint32_t gc_scalar_data_cache_size_per_sqc;
|
||||
uint32_t gc_scalar_data_cache_line_size;
|
||||
uint32_t gc_tcc_cache_line_size;
|
||||
};
|
||||
|
||||
struct amdgpu_cu_info {
|
||||
|
@ -3005,7 +3005,7 @@ static int gfx_v12_0_compute_mqd_init(struct amdgpu_device *adev, void *m,
|
||||
(order_base_2(prop->queue_size / 4) - 1));
|
||||
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, RPTR_BLOCK_SIZE,
|
||||
(order_base_2(AMDGPU_GPU_PAGE_SIZE / 4) - 1));
|
||||
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 0);
|
||||
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 1);
|
||||
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, TUNNEL_DISPATCH, 0);
|
||||
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1);
|
||||
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1);
|
||||
|
@ -187,6 +187,7 @@ static void update_mqd(struct mqd_manager *mm, void *mqd,
|
||||
m->cp_hqd_pq_control = 5 << CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE__SHIFT;
|
||||
m->cp_hqd_pq_control |=
|
||||
ffs(q->queue_size / sizeof(unsigned int)) - 1 - 1;
|
||||
m->cp_hqd_pq_control |= CP_HQD_PQ_CONTROL__UNORD_DISPATCH_MASK;
|
||||
pr_debug("cp_hqd_pq_control 0x%x\n", m->cp_hqd_pq_control);
|
||||
|
||||
m->cp_hqd_pq_base_lo = lower_32_bits((uint64_t)q->queue_address >> 8);
|
||||
|
@ -28,6 +28,7 @@
|
||||
#include <drm/drm_blend.h>
|
||||
#include <drm/drm_gem_atomic_helper.h>
|
||||
#include <drm/drm_plane_helper.h>
|
||||
#include <drm/drm_gem_framebuffer_helper.h>
|
||||
#include <drm/drm_fourcc.h>
|
||||
|
||||
#include "amdgpu.h"
|
||||
@ -935,10 +936,14 @@ static int amdgpu_dm_plane_helper_prepare_fb(struct drm_plane *plane,
|
||||
}
|
||||
|
||||
afb = to_amdgpu_framebuffer(new_state->fb);
|
||||
obj = new_state->fb->obj[0];
|
||||
obj = drm_gem_fb_get_obj(new_state->fb, 0);
|
||||
if (!obj) {
|
||||
DRM_ERROR("Failed to get obj from framebuffer\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
rbo = gem_to_amdgpu_bo(obj);
|
||||
adev = amdgpu_ttm_adev(rbo->tbo.bdev);
|
||||
|
||||
r = amdgpu_bo_reserve(rbo, true);
|
||||
if (r) {
|
||||
dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
|
||||
|
@ -258,6 +258,48 @@ struct gc_info_v1_2 {
|
||||
uint32_t gc_gl2c_per_gpu;
|
||||
};
|
||||
|
||||
struct gc_info_v1_3 {
|
||||
struct gpu_info_header header;
|
||||
uint32_t gc_num_se;
|
||||
uint32_t gc_num_wgp0_per_sa;
|
||||
uint32_t gc_num_wgp1_per_sa;
|
||||
uint32_t gc_num_rb_per_se;
|
||||
uint32_t gc_num_gl2c;
|
||||
uint32_t gc_num_gprs;
|
||||
uint32_t gc_num_max_gs_thds;
|
||||
uint32_t gc_gs_table_depth;
|
||||
uint32_t gc_gsprim_buff_depth;
|
||||
uint32_t gc_parameter_cache_depth;
|
||||
uint32_t gc_double_offchip_lds_buffer;
|
||||
uint32_t gc_wave_size;
|
||||
uint32_t gc_max_waves_per_simd;
|
||||
uint32_t gc_max_scratch_slots_per_cu;
|
||||
uint32_t gc_lds_size;
|
||||
uint32_t gc_num_sc_per_se;
|
||||
uint32_t gc_num_sa_per_se;
|
||||
uint32_t gc_num_packer_per_sc;
|
||||
uint32_t gc_num_gl2a;
|
||||
uint32_t gc_num_tcp_per_sa;
|
||||
uint32_t gc_num_sdp_interface;
|
||||
uint32_t gc_num_tcps;
|
||||
uint32_t gc_num_tcp_per_wpg;
|
||||
uint32_t gc_tcp_l1_size;
|
||||
uint32_t gc_num_sqc_per_wgp;
|
||||
uint32_t gc_l1_instruction_cache_size_per_sqc;
|
||||
uint32_t gc_l1_data_cache_size_per_sqc;
|
||||
uint32_t gc_gl1c_per_sa;
|
||||
uint32_t gc_gl1c_size_per_instance;
|
||||
uint32_t gc_gl2c_per_gpu;
|
||||
uint32_t gc_tcp_size_per_cu;
|
||||
uint32_t gc_tcp_cache_line_size;
|
||||
uint32_t gc_instruction_cache_size_per_sqc;
|
||||
uint32_t gc_instruction_cache_line_size;
|
||||
uint32_t gc_scalar_data_cache_size_per_sqc;
|
||||
uint32_t gc_scalar_data_cache_line_size;
|
||||
uint32_t gc_tcc_size;
|
||||
uint32_t gc_tcc_cache_line_size;
|
||||
};
|
||||
|
||||
struct gc_info_v2_0 {
|
||||
struct gpu_info_header header;
|
||||
|
||||
|
@ -2224,8 +2224,9 @@ static int smu_bump_power_profile_mode(struct smu_context *smu,
|
||||
}
|
||||
|
||||
static int smu_adjust_power_state_dynamic(struct smu_context *smu,
|
||||
enum amd_dpm_forced_level level,
|
||||
bool skip_display_settings)
|
||||
enum amd_dpm_forced_level level,
|
||||
bool skip_display_settings,
|
||||
bool force_update)
|
||||
{
|
||||
int ret = 0;
|
||||
int index = 0;
|
||||
@ -2254,7 +2255,7 @@ static int smu_adjust_power_state_dynamic(struct smu_context *smu,
|
||||
}
|
||||
}
|
||||
|
||||
if (smu_dpm_ctx->dpm_level != level) {
|
||||
if (force_update || smu_dpm_ctx->dpm_level != level) {
|
||||
ret = smu_asic_set_performance_level(smu, level);
|
||||
if (ret) {
|
||||
dev_err(smu->adev->dev, "Failed to set performance level!");
|
||||
@ -2265,13 +2266,12 @@ static int smu_adjust_power_state_dynamic(struct smu_context *smu,
|
||||
smu_dpm_ctx->dpm_level = level;
|
||||
}
|
||||
|
||||
if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL &&
|
||||
smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) {
|
||||
if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) {
|
||||
index = fls(smu->workload_mask);
|
||||
index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
|
||||
workload[0] = smu->workload_setting[index];
|
||||
|
||||
if (smu->power_profile_mode != workload[0])
|
||||
if (force_update || smu->power_profile_mode != workload[0])
|
||||
smu_bump_power_profile_mode(smu, workload, 0);
|
||||
}
|
||||
|
||||
@ -2292,11 +2292,13 @@ static int smu_handle_task(struct smu_context *smu,
|
||||
ret = smu_pre_display_config_changed(smu);
|
||||
if (ret)
|
||||
return ret;
|
||||
ret = smu_adjust_power_state_dynamic(smu, level, false);
|
||||
ret = smu_adjust_power_state_dynamic(smu, level, false, false);
|
||||
break;
|
||||
case AMD_PP_TASK_COMPLETE_INIT:
|
||||
ret = smu_adjust_power_state_dynamic(smu, level, true, true);
|
||||
break;
|
||||
case AMD_PP_TASK_READJUST_POWER_STATE:
|
||||
ret = smu_adjust_power_state_dynamic(smu, level, true);
|
||||
ret = smu_adjust_power_state_dynamic(smu, level, true, false);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
@ -2343,8 +2345,7 @@ static int smu_switch_power_profile(void *handle,
|
||||
workload[0] = smu->workload_setting[index];
|
||||
}
|
||||
|
||||
if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL &&
|
||||
smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM)
|
||||
if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM)
|
||||
smu_bump_power_profile_mode(smu, workload, 0);
|
||||
|
||||
return 0;
|
||||
|
@ -92,7 +92,6 @@
|
||||
|
||||
//Resets
|
||||
#define PPSMC_MSG_PrepareMp1ForUnload 0x2E
|
||||
#define PPSMC_MSG_Mode1Reset 0x2F
|
||||
|
||||
//Set SystemVirtual DramAddrHigh
|
||||
#define PPSMC_MSG_SetSystemVirtualDramAddrHigh 0x30
|
||||
@ -119,11 +118,12 @@
|
||||
|
||||
//STB to dram log
|
||||
#define PPSMC_MSG_DumpSTBtoDram 0x3D
|
||||
#define PPSMC_MSG_STBtoDramLogSetDramAddrHigh 0x3E
|
||||
#define PPSMC_MSG_STBtoDramLogSetDramAddrLow 0x3F
|
||||
#define PPSMC_MSG_STBtoDramLogSetDramAddress 0x3E
|
||||
#define PPSMC_MSG_DummyUndefined 0x3F
|
||||
#define PPSMC_MSG_STBtoDramLogSetDramSize 0x40
|
||||
#define PPSMC_MSG_SetOBMTraceBufferLogging 0x41
|
||||
|
||||
#define PPSMC_MSG_UseProfilingMode 0x42
|
||||
#define PPSMC_MSG_AllowGfxDcs 0x43
|
||||
#define PPSMC_MSG_DisallowGfxDcs 0x44
|
||||
#define PPSMC_MSG_EnableAudioStutterWA 0x45
|
||||
@ -135,6 +135,16 @@
|
||||
#define PPSMC_MSG_SetBadMemoryPagesRetiredFlagsPerChannel 0x4B
|
||||
#define PPSMC_MSG_SetPriorityDeltaGain 0x4C
|
||||
#define PPSMC_MSG_AllowIHHostInterrupt 0x4D
|
||||
#define PPSMC_MSG_EnableShadowDpm 0x4E
|
||||
#define PPSMC_MSG_Mode3Reset 0x4F
|
||||
#define PPSMC_Message_Count 0x50
|
||||
#define PPSMC_MSG_SetDriverDramAddr 0x50
|
||||
#define PPSMC_MSG_SetToolsDramAddr 0x51
|
||||
#define PPSMC_MSG_TransferTableSmu2DramWithAddr 0x52
|
||||
#define PPSMC_MSG_TransferTableDram2SmuWithAddr 0x53
|
||||
#define PPSMC_MSG_GetAllRunningSmuFeatures 0x54
|
||||
#define PPSMC_MSG_GetSvi3Voltage 0x55
|
||||
#define PPSMC_MSG_UpdatePolicy 0x56
|
||||
#define PPSMC_MSG_ExtPwrConnSupport 0x57
|
||||
#define PPSMC_MSG_PreloadSwPstateForUclkOverDrive 0x58
|
||||
#define PPSMC_Message_Count 0x59
|
||||
#endif
|
||||
|
@ -121,6 +121,7 @@ struct mca_ras_info {
|
||||
|
||||
#define P2S_TABLE_ID_A 0x50325341
|
||||
#define P2S_TABLE_ID_X 0x50325358
|
||||
#define P2S_TABLE_ID_3 0x50325303
|
||||
|
||||
// clang-format off
|
||||
static const struct cmn2asic_msg_mapping smu_v13_0_6_message_map[SMU_MSG_MAX_COUNT] = {
|
||||
@ -271,14 +272,18 @@ static int smu_v13_0_6_init_microcode(struct smu_context *smu)
|
||||
struct amdgpu_device *adev = smu->adev;
|
||||
uint32_t p2s_table_id = P2S_TABLE_ID_A;
|
||||
int ret = 0, i, p2stable_count;
|
||||
int var = (adev->pdev->device & 0xF);
|
||||
char ucode_prefix[15];
|
||||
|
||||
/* No need to load P2S tables in IOV mode */
|
||||
if (amdgpu_sriov_vf(adev))
|
||||
return 0;
|
||||
|
||||
if (!(adev->flags & AMD_IS_APU))
|
||||
if (!(adev->flags & AMD_IS_APU)) {
|
||||
p2s_table_id = P2S_TABLE_ID_X;
|
||||
if (var == 0x5)
|
||||
p2s_table_id = P2S_TABLE_ID_3;
|
||||
}
|
||||
|
||||
amdgpu_ucode_ip_version_decode(adev, MP1_HWIP, ucode_prefix,
|
||||
sizeof(ucode_prefix));
|
||||
|
@ -2378,7 +2378,7 @@ static int smu_v13_0_7_get_power_profile_mode(struct smu_context *smu, char *buf
|
||||
|
||||
size += sysfs_emit_at(buf, size, " ");
|
||||
for (i = 0; i <= PP_SMC_POWER_PROFILE_WINDOW3D; i++)
|
||||
size += sysfs_emit_at(buf, size, "%-14s%s", amdgpu_pp_profile_name[i],
|
||||
size += sysfs_emit_at(buf, size, "%d %-14s%s", i, amdgpu_pp_profile_name[i],
|
||||
(i == smu->power_profile_mode) ? "* " : " ");
|
||||
|
||||
size += sysfs_emit_at(buf, size, "\n");
|
||||
@ -2408,7 +2408,7 @@ static int smu_v13_0_7_get_power_profile_mode(struct smu_context *smu, char *buf
|
||||
do { \
|
||||
size += sysfs_emit_at(buf, size, "%-30s", #field); \
|
||||
for (j = 0; j <= PP_SMC_POWER_PROFILE_WINDOW3D; j++) \
|
||||
size += sysfs_emit_at(buf, size, "%-16d", activity_monitor_external[j].DpmActivityMonitorCoeffInt.field); \
|
||||
size += sysfs_emit_at(buf, size, "%-18d", activity_monitor_external[j].DpmActivityMonitorCoeffInt.field); \
|
||||
size += sysfs_emit_at(buf, size, "\n"); \
|
||||
} while (0)
|
||||
|
||||
|
@ -115,7 +115,6 @@ static struct cmn2asic_msg_mapping smu_v14_0_2_message_map[SMU_MSG_MAX_COUNT] =
|
||||
MSG_MAP(SetMGpuFanBoostLimitRpm, PPSMC_MSG_SetMGpuFanBoostLimitRpm, 0),
|
||||
MSG_MAP(GetPptLimit, PPSMC_MSG_GetPptLimit, 0),
|
||||
MSG_MAP(NotifyPowerSource, PPSMC_MSG_NotifyPowerSource, 0),
|
||||
MSG_MAP(Mode1Reset, PPSMC_MSG_Mode1Reset, 0),
|
||||
MSG_MAP(PrepareMp1ForUnload, PPSMC_MSG_PrepareMp1ForUnload, 0),
|
||||
MSG_MAP(DFCstateControl, PPSMC_MSG_SetExternalClientDfCstateAllow, 0),
|
||||
MSG_MAP(ArmD3, PPSMC_MSG_ArmD3, 0),
|
||||
@ -1824,50 +1823,6 @@ static void smu_v14_0_2_set_smu_mailbox_registers(struct smu_context *smu)
|
||||
smu->debug_resp_reg = SOC15_REG_OFFSET(MP1, 0, regMP1_SMN_C2PMSG_54);
|
||||
}
|
||||
|
||||
static int smu_v14_0_2_smu_send_bad_mem_page_num(struct smu_context *smu,
|
||||
uint32_t size)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
/* message SMU to update the bad page number on SMUBUS */
|
||||
ret = smu_cmn_send_smc_msg_with_param(smu,
|
||||
SMU_MSG_SetNumBadMemoryPagesRetired,
|
||||
size, NULL);
|
||||
if (ret)
|
||||
dev_err(smu->adev->dev,
|
||||
"[%s] failed to message SMU to update bad memory pages number\n",
|
||||
__func__);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int smu_v14_0_2_send_bad_mem_channel_flag(struct smu_context *smu,
|
||||
uint32_t size)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
/* message SMU to update the bad channel info on SMUBUS */
|
||||
ret = smu_cmn_send_smc_msg_with_param(smu,
|
||||
SMU_MSG_SetBadMemoryPagesRetiredFlagsPerChannel,
|
||||
size, NULL);
|
||||
if (ret)
|
||||
dev_err(smu->adev->dev,
|
||||
"[%s] failed to message SMU to update bad memory pages channel info\n",
|
||||
__func__);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static ssize_t smu_v14_0_2_get_ecc_info(struct smu_context *smu,
|
||||
void *table)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
// TODO
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static ssize_t smu_v14_0_2_get_gpu_metrics(struct smu_context *smu,
|
||||
void **table)
|
||||
{
|
||||
@ -2015,12 +1970,9 @@ static const struct pptable_funcs smu_v14_0_2_ppt_funcs = {
|
||||
.enable_gfx_features = smu_v14_0_2_enable_gfx_features,
|
||||
.set_mp1_state = smu_v14_0_2_set_mp1_state,
|
||||
.set_df_cstate = smu_v14_0_2_set_df_cstate,
|
||||
.send_hbm_bad_pages_num = smu_v14_0_2_smu_send_bad_mem_page_num,
|
||||
.send_hbm_bad_channel_flag = smu_v14_0_2_send_bad_mem_channel_flag,
|
||||
#if 0
|
||||
.gpo_control = smu_v14_0_gpo_control,
|
||||
#endif
|
||||
.get_ecc_info = smu_v14_0_2_get_ecc_info,
|
||||
};
|
||||
|
||||
void smu_v14_0_2_set_ppt_funcs(struct smu_context *smu)
|
||||
|
@ -5935,6 +5935,18 @@ intel_dp_detect(struct drm_connector *connector,
|
||||
else
|
||||
status = connector_status_disconnected;
|
||||
|
||||
if (status != connector_status_disconnected &&
|
||||
!intel_dp_mst_verify_dpcd_state(intel_dp))
|
||||
/*
|
||||
* This requires retrying detection for instance to re-enable
|
||||
* the MST mode that got reset via a long HPD pulse. The retry
|
||||
* will happen either via the hotplug handler's retry logic,
|
||||
* ensured by setting the connector here to SST/disconnected,
|
||||
* or via a userspace connector probing in response to the
|
||||
* hotplug uevent sent when removing the MST connectors.
|
||||
*/
|
||||
status = connector_status_disconnected;
|
||||
|
||||
if (status == connector_status_disconnected) {
|
||||
memset(&intel_dp->compliance, 0, sizeof(intel_dp->compliance));
|
||||
memset(intel_connector->dp.dsc_dpcd, 0, sizeof(intel_connector->dp.dsc_dpcd));
|
||||
|
@ -1998,3 +1998,43 @@ bool intel_dp_mst_crtc_needs_modeset(struct intel_atomic_state *state,
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
* intel_dp_mst_verify_dpcd_state - verify the MST SW enabled state wrt. the DPCD
|
||||
* @intel_dp: DP port object
|
||||
*
|
||||
* Verify if @intel_dp's MST enabled SW state matches the corresponding DPCD
|
||||
* state. A long HPD pulse - not long enough to be detected as a disconnected
|
||||
* state - could've reset the DPCD state, which requires tearing
|
||||
* down/recreating the MST topology.
|
||||
*
|
||||
* Returns %true if the SW MST enabled and DPCD states match, %false
|
||||
* otherwise.
|
||||
*/
|
||||
bool intel_dp_mst_verify_dpcd_state(struct intel_dp *intel_dp)
|
||||
{
|
||||
struct intel_display *display = to_intel_display(intel_dp);
|
||||
struct intel_connector *connector = intel_dp->attached_connector;
|
||||
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
|
||||
struct intel_encoder *encoder = &dig_port->base;
|
||||
int ret;
|
||||
u8 val;
|
||||
|
||||
if (!intel_dp->is_mst)
|
||||
return true;
|
||||
|
||||
ret = drm_dp_dpcd_readb(intel_dp->mst_mgr.aux, DP_MSTM_CTRL, &val);
|
||||
|
||||
/* Adjust the expected register value for SST + SideBand. */
|
||||
if (ret < 0 || val != (DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC)) {
|
||||
drm_dbg_kms(display->drm,
|
||||
"[CONNECTOR:%d:%s][ENCODER:%d:%s] MST mode got reset, removing topology (ret=%d, ctrl=0x%02x)\n",
|
||||
connector->base.base.id, connector->base.name,
|
||||
encoder->base.base.id, encoder->base.name,
|
||||
ret, val);
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
@ -27,5 +27,6 @@ int intel_dp_mst_atomic_check_link(struct intel_atomic_state *state,
|
||||
struct intel_link_bw_limits *limits);
|
||||
bool intel_dp_mst_crtc_needs_modeset(struct intel_atomic_state *state,
|
||||
struct intel_crtc *crtc);
|
||||
bool intel_dp_mst_verify_dpcd_state(struct intel_dp *intel_dp);
|
||||
|
||||
#endif /* __INTEL_DP_MST_H__ */
|
||||
|
@ -1870,7 +1870,6 @@ static const struct dmi_system_id vlv_dsi_dmi_quirk_table[] = {
|
||||
/* Lenovo Yoga Tab 3 Pro YT3-X90F */
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "Intel Corporation"),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "CHERRYVIEW D1 PLATFORM"),
|
||||
DMI_MATCH(DMI_PRODUCT_VERSION, "Blade3-10A-001"),
|
||||
},
|
||||
.driver_data = (void *)vlv_dsi_lenovo_yoga_tab3_backlight_fixup,
|
||||
|
@ -212,6 +212,37 @@ int intel_gsc_fw_get_binary_info(struct intel_uc_fw *gsc_fw, const void *data, s
|
||||
}
|
||||
}
|
||||
|
||||
if (IS_ARROWLAKE(gt->i915)) {
|
||||
bool too_old = false;
|
||||
|
||||
/*
|
||||
* ARL requires a newer firmware than MTL did (102.0.10.1878) but the
|
||||
* firmware is actually common. So, need to do an explicit version check
|
||||
* here rather than using a separate table entry. And if the older
|
||||
* MTL-only version is found, then just don't use GSC rather than aborting
|
||||
* the driver load.
|
||||
*/
|
||||
if (gsc->release.major < 102) {
|
||||
too_old = true;
|
||||
} else if (gsc->release.major == 102) {
|
||||
if (gsc->release.minor == 0) {
|
||||
if (gsc->release.patch < 10) {
|
||||
too_old = true;
|
||||
} else if (gsc->release.patch == 10) {
|
||||
if (gsc->release.build < 1878)
|
||||
too_old = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (too_old) {
|
||||
gt_info(gt, "GSC firmware too old for ARL, got %d.%d.%d.%d but need at least 102.0.10.1878",
|
||||
gsc->release.major, gsc->release.minor,
|
||||
gsc->release.patch, gsc->release.build);
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -698,12 +698,18 @@ static int check_gsc_manifest(struct intel_gt *gt,
|
||||
const struct firmware *fw,
|
||||
struct intel_uc_fw *uc_fw)
|
||||
{
|
||||
int ret;
|
||||
|
||||
switch (uc_fw->type) {
|
||||
case INTEL_UC_FW_TYPE_HUC:
|
||||
intel_huc_fw_get_binary_info(uc_fw, fw->data, fw->size);
|
||||
ret = intel_huc_fw_get_binary_info(uc_fw, fw->data, fw->size);
|
||||
if (ret)
|
||||
return ret;
|
||||
break;
|
||||
case INTEL_UC_FW_TYPE_GSC:
|
||||
intel_gsc_fw_get_binary_info(uc_fw, fw->data, fw->size);
|
||||
ret = intel_gsc_fw_get_binary_info(uc_fw, fw->data, fw->size);
|
||||
if (ret)
|
||||
return ret;
|
||||
break;
|
||||
default:
|
||||
MISSING_CASE(uc_fw->type);
|
||||
|
@ -546,6 +546,8 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
|
||||
#define IS_LUNARLAKE(i915) (0 && i915)
|
||||
#define IS_BATTLEMAGE(i915) (0 && i915)
|
||||
|
||||
#define IS_ARROWLAKE(i915) \
|
||||
IS_SUBPLATFORM(i915, INTEL_METEORLAKE, INTEL_SUBPLATFORM_ARL)
|
||||
#define IS_DG2_G10(i915) \
|
||||
IS_SUBPLATFORM(i915, INTEL_DG2, INTEL_SUBPLATFORM_G10)
|
||||
#define IS_DG2_G11(i915) \
|
||||
|
@ -203,6 +203,10 @@ static const u16 subplatform_g12_ids[] = {
|
||||
INTEL_DG2_G12_IDS(ID),
|
||||
};
|
||||
|
||||
static const u16 subplatform_arl_ids[] = {
|
||||
INTEL_ARL_IDS(ID),
|
||||
};
|
||||
|
||||
static bool find_devid(u16 id, const u16 *p, unsigned int num)
|
||||
{
|
||||
for (; num; num--, p++) {
|
||||
@ -260,6 +264,9 @@ static void intel_device_info_subplatform_init(struct drm_i915_private *i915)
|
||||
} else if (find_devid(devid, subplatform_g12_ids,
|
||||
ARRAY_SIZE(subplatform_g12_ids))) {
|
||||
mask = BIT(INTEL_SUBPLATFORM_G12);
|
||||
} else if (find_devid(devid, subplatform_arl_ids,
|
||||
ARRAY_SIZE(subplatform_arl_ids))) {
|
||||
mask = BIT(INTEL_SUBPLATFORM_ARL);
|
||||
}
|
||||
|
||||
GEM_BUG_ON(mask & ~INTEL_SUBPLATFORM_MASK);
|
||||
|
@ -127,6 +127,9 @@ enum intel_platform {
|
||||
#define INTEL_SUBPLATFORM_N 1
|
||||
#define INTEL_SUBPLATFORM_RPLU 2
|
||||
|
||||
/* MTL */
|
||||
#define INTEL_SUBPLATFORM_ARL 0
|
||||
|
||||
enum intel_ppgtt_type {
|
||||
INTEL_PPGTT_NONE = I915_GEM_PPGTT_NONE,
|
||||
INTEL_PPGTT_ALIASING = I915_GEM_PPGTT_ALIASING,
|
||||
|
@ -134,6 +134,8 @@ v3d_job_start_stats(struct v3d_job *job, enum v3d_queue queue)
|
||||
struct v3d_stats *local_stats = &file->stats[queue];
|
||||
u64 now = local_clock();
|
||||
|
||||
preempt_disable();
|
||||
|
||||
write_seqcount_begin(&local_stats->lock);
|
||||
local_stats->start_ns = now;
|
||||
write_seqcount_end(&local_stats->lock);
|
||||
@ -141,6 +143,8 @@ v3d_job_start_stats(struct v3d_job *job, enum v3d_queue queue)
|
||||
write_seqcount_begin(&global_stats->lock);
|
||||
global_stats->start_ns = now;
|
||||
write_seqcount_end(&global_stats->lock);
|
||||
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
static void
|
||||
@ -162,8 +166,10 @@ v3d_job_update_stats(struct v3d_job *job, enum v3d_queue queue)
|
||||
struct v3d_stats *local_stats = &file->stats[queue];
|
||||
u64 now = local_clock();
|
||||
|
||||
preempt_disable();
|
||||
v3d_stats_update(local_stats, now);
|
||||
v3d_stats_update(global_stats, now);
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
static struct dma_fence *v3d_bin_job_run(struct drm_sched_job *sched_job)
|
||||
|
@ -27,6 +27,8 @@
|
||||
**************************************************************************/
|
||||
|
||||
#include "vmwgfx_drv.h"
|
||||
|
||||
#include "vmwgfx_bo.h"
|
||||
#include <linux/highmem.h>
|
||||
|
||||
/*
|
||||
@ -420,13 +422,105 @@ static int vmw_bo_cpu_blit_line(struct vmw_bo_blit_line_data *d,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void *map_external(struct vmw_bo *bo, struct iosys_map *map)
|
||||
{
|
||||
struct vmw_private *vmw =
|
||||
container_of(bo->tbo.bdev, struct vmw_private, bdev);
|
||||
void *ptr = NULL;
|
||||
int ret;
|
||||
|
||||
if (bo->tbo.base.import_attach) {
|
||||
ret = dma_buf_vmap(bo->tbo.base.dma_buf, map);
|
||||
if (ret) {
|
||||
drm_dbg_driver(&vmw->drm,
|
||||
"Wasn't able to map external bo!\n");
|
||||
goto out;
|
||||
}
|
||||
ptr = map->vaddr;
|
||||
} else {
|
||||
ptr = vmw_bo_map_and_cache(bo);
|
||||
}
|
||||
|
||||
out:
|
||||
return ptr;
|
||||
}
|
||||
|
||||
static void unmap_external(struct vmw_bo *bo, struct iosys_map *map)
|
||||
{
|
||||
if (bo->tbo.base.import_attach)
|
||||
dma_buf_vunmap(bo->tbo.base.dma_buf, map);
|
||||
else
|
||||
vmw_bo_unmap(bo);
|
||||
}
|
||||
|
||||
static int vmw_external_bo_copy(struct vmw_bo *dst, u32 dst_offset,
|
||||
u32 dst_stride, struct vmw_bo *src,
|
||||
u32 src_offset, u32 src_stride,
|
||||
u32 width_in_bytes, u32 height,
|
||||
struct vmw_diff_cpy *diff)
|
||||
{
|
||||
struct vmw_private *vmw =
|
||||
container_of(dst->tbo.bdev, struct vmw_private, bdev);
|
||||
size_t dst_size = dst->tbo.resource->size;
|
||||
size_t src_size = src->tbo.resource->size;
|
||||
struct iosys_map dst_map = {0};
|
||||
struct iosys_map src_map = {0};
|
||||
int ret, i;
|
||||
int x_in_bytes;
|
||||
u8 *vsrc;
|
||||
u8 *vdst;
|
||||
|
||||
vsrc = map_external(src, &src_map);
|
||||
if (!vsrc) {
|
||||
drm_dbg_driver(&vmw->drm, "Wasn't able to map src\n");
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
vdst = map_external(dst, &dst_map);
|
||||
if (!vdst) {
|
||||
drm_dbg_driver(&vmw->drm, "Wasn't able to map dst\n");
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
vsrc += src_offset;
|
||||
vdst += dst_offset;
|
||||
if (src_stride == dst_stride) {
|
||||
dst_size -= dst_offset;
|
||||
src_size -= src_offset;
|
||||
memcpy(vdst, vsrc,
|
||||
min(dst_stride * height, min(dst_size, src_size)));
|
||||
} else {
|
||||
WARN_ON(dst_stride < width_in_bytes);
|
||||
for (i = 0; i < height; ++i) {
|
||||
memcpy(vdst, vsrc, width_in_bytes);
|
||||
vsrc += src_stride;
|
||||
vdst += dst_stride;
|
||||
}
|
||||
}
|
||||
|
||||
x_in_bytes = (dst_offset % dst_stride);
|
||||
diff->rect.x1 = x_in_bytes / diff->cpp;
|
||||
diff->rect.y1 = ((dst_offset - x_in_bytes) / dst_stride);
|
||||
diff->rect.x2 = diff->rect.x1 + width_in_bytes / diff->cpp;
|
||||
diff->rect.y2 = diff->rect.y1 + height;
|
||||
|
||||
ret = 0;
|
||||
out:
|
||||
unmap_external(src, &src_map);
|
||||
unmap_external(dst, &dst_map);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* vmw_bo_cpu_blit - in-kernel cpu blit.
|
||||
*
|
||||
* @dst: Destination buffer object.
|
||||
* @vmw_dst: Destination buffer object.
|
||||
* @dst_offset: Destination offset of blit start in bytes.
|
||||
* @dst_stride: Destination stride in bytes.
|
||||
* @src: Source buffer object.
|
||||
* @vmw_src: Source buffer object.
|
||||
* @src_offset: Source offset of blit start in bytes.
|
||||
* @src_stride: Source stride in bytes.
|
||||
* @w: Width of blit.
|
||||
@ -444,13 +538,15 @@ static int vmw_bo_cpu_blit_line(struct vmw_bo_blit_line_data *d,
|
||||
* Neither of the buffer objects may be placed in PCI memory
|
||||
* (Fixed memory in TTM terminology) when using this function.
|
||||
*/
|
||||
int vmw_bo_cpu_blit(struct ttm_buffer_object *dst,
|
||||
int vmw_bo_cpu_blit(struct vmw_bo *vmw_dst,
|
||||
u32 dst_offset, u32 dst_stride,
|
||||
struct ttm_buffer_object *src,
|
||||
struct vmw_bo *vmw_src,
|
||||
u32 src_offset, u32 src_stride,
|
||||
u32 w, u32 h,
|
||||
struct vmw_diff_cpy *diff)
|
||||
{
|
||||
struct ttm_buffer_object *src = &vmw_src->tbo;
|
||||
struct ttm_buffer_object *dst = &vmw_dst->tbo;
|
||||
struct ttm_operation_ctx ctx = {
|
||||
.interruptible = false,
|
||||
.no_wait_gpu = false
|
||||
@ -460,6 +556,11 @@ int vmw_bo_cpu_blit(struct ttm_buffer_object *dst,
|
||||
int ret = 0;
|
||||
struct page **dst_pages = NULL;
|
||||
struct page **src_pages = NULL;
|
||||
bool src_external = (src->ttm->page_flags & TTM_TT_FLAG_EXTERNAL) != 0;
|
||||
bool dst_external = (dst->ttm->page_flags & TTM_TT_FLAG_EXTERNAL) != 0;
|
||||
|
||||
if (WARN_ON(dst == src))
|
||||
return -EINVAL;
|
||||
|
||||
/* Buffer objects need to be either pinned or reserved: */
|
||||
if (!(dst->pin_count))
|
||||
@ -479,6 +580,11 @@ int vmw_bo_cpu_blit(struct ttm_buffer_object *dst,
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (src_external || dst_external)
|
||||
return vmw_external_bo_copy(vmw_dst, dst_offset, dst_stride,
|
||||
vmw_src, src_offset, src_stride,
|
||||
w, h, diff);
|
||||
|
||||
if (!src->ttm->pages && src->ttm->sg) {
|
||||
src_pages = kvmalloc_array(src->ttm->num_pages,
|
||||
sizeof(struct page *), GFP_KERNEL);
|
||||
|
@ -360,6 +360,8 @@ void *vmw_bo_map_and_cache_size(struct vmw_bo *vbo, size_t size)
|
||||
void *virtual;
|
||||
int ret;
|
||||
|
||||
atomic_inc(&vbo->map_count);
|
||||
|
||||
virtual = ttm_kmap_obj_virtual(&vbo->map, ¬_used);
|
||||
if (virtual)
|
||||
return virtual;
|
||||
@ -383,11 +385,17 @@ void *vmw_bo_map_and_cache_size(struct vmw_bo *vbo, size_t size)
|
||||
*/
|
||||
void vmw_bo_unmap(struct vmw_bo *vbo)
|
||||
{
|
||||
int map_count;
|
||||
|
||||
if (vbo->map.bo == NULL)
|
||||
return;
|
||||
|
||||
ttm_bo_kunmap(&vbo->map);
|
||||
vbo->map.bo = NULL;
|
||||
map_count = atomic_dec_return(&vbo->map_count);
|
||||
|
||||
if (!map_count) {
|
||||
ttm_bo_kunmap(&vbo->map);
|
||||
vbo->map.bo = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -421,6 +429,7 @@ static int vmw_bo_init(struct vmw_private *dev_priv,
|
||||
vmw_bo->tbo.priority = 3;
|
||||
vmw_bo->res_tree = RB_ROOT;
|
||||
xa_init(&vmw_bo->detached_resources);
|
||||
atomic_set(&vmw_bo->map_count, 0);
|
||||
|
||||
params->size = ALIGN(params->size, PAGE_SIZE);
|
||||
drm_gem_private_object_init(vdev, &vmw_bo->tbo.base, params->size);
|
||||
|
@ -71,6 +71,8 @@ struct vmw_bo_params {
|
||||
* @map: Kmap object for semi-persistent mappings
|
||||
* @res_tree: RB tree of resources using this buffer object as a backing MOB
|
||||
* @res_prios: Eviction priority counts for attached resources
|
||||
* @map_count: The number of currently active maps. Will differ from the
|
||||
* cpu_writers because it includes kernel maps.
|
||||
* @cpu_writers: Number of synccpu write grabs. Protected by reservation when
|
||||
* increased. May be decreased without reservation.
|
||||
* @dx_query_ctx: DX context if this buffer object is used as a DX query MOB
|
||||
@ -90,6 +92,7 @@ struct vmw_bo {
|
||||
u32 res_prios[TTM_MAX_BO_PRIORITY];
|
||||
struct xarray detached_resources;
|
||||
|
||||
atomic_t map_count;
|
||||
atomic_t cpu_writers;
|
||||
/* Not ref-counted. Protected by binding_mutex */
|
||||
struct vmw_resource *dx_query_ctx;
|
||||
|
@ -1353,9 +1353,9 @@ void vmw_diff_memcpy(struct vmw_diff_cpy *diff, u8 *dest, const u8 *src,
|
||||
|
||||
void vmw_memcpy(struct vmw_diff_cpy *diff, u8 *dest, const u8 *src, size_t n);
|
||||
|
||||
int vmw_bo_cpu_blit(struct ttm_buffer_object *dst,
|
||||
int vmw_bo_cpu_blit(struct vmw_bo *dst,
|
||||
u32 dst_offset, u32 dst_stride,
|
||||
struct ttm_buffer_object *src,
|
||||
struct vmw_bo *src,
|
||||
u32 src_offset, u32 src_stride,
|
||||
u32 w, u32 h,
|
||||
struct vmw_diff_cpy *diff);
|
||||
|
@ -502,7 +502,7 @@ static void vmw_stdu_bo_cpu_commit(struct vmw_kms_dirty *dirty)
|
||||
container_of(dirty->unit, typeof(*stdu), base);
|
||||
s32 width, height;
|
||||
s32 src_pitch, dst_pitch;
|
||||
struct ttm_buffer_object *src_bo, *dst_bo;
|
||||
struct vmw_bo *src_bo, *dst_bo;
|
||||
u32 src_offset, dst_offset;
|
||||
struct vmw_diff_cpy diff = VMW_CPU_BLIT_DIFF_INITIALIZER(stdu->cpp);
|
||||
|
||||
@ -517,11 +517,11 @@ static void vmw_stdu_bo_cpu_commit(struct vmw_kms_dirty *dirty)
|
||||
|
||||
/* Assume we are blitting from Guest (bo) to Host (display_srf) */
|
||||
src_pitch = stdu->display_srf->metadata.base_size.width * stdu->cpp;
|
||||
src_bo = &stdu->display_srf->res.guest_memory_bo->tbo;
|
||||
src_bo = stdu->display_srf->res.guest_memory_bo;
|
||||
src_offset = ddirty->top * src_pitch + ddirty->left * stdu->cpp;
|
||||
|
||||
dst_pitch = ddirty->pitch;
|
||||
dst_bo = &ddirty->buf->tbo;
|
||||
dst_bo = ddirty->buf;
|
||||
dst_offset = ddirty->fb_top * dst_pitch + ddirty->fb_left * stdu->cpp;
|
||||
|
||||
(void) vmw_bo_cpu_blit(dst_bo, dst_offset, dst_pitch,
|
||||
@ -1170,7 +1170,7 @@ vmw_stdu_bo_populate_update_cpu(struct vmw_du_update_plane *update, void *cmd,
|
||||
struct vmw_diff_cpy diff = VMW_CPU_BLIT_DIFF_INITIALIZER(0);
|
||||
struct vmw_stdu_update_gb_image *cmd_img = cmd;
|
||||
struct vmw_stdu_update *cmd_update;
|
||||
struct ttm_buffer_object *src_bo, *dst_bo;
|
||||
struct vmw_bo *src_bo, *dst_bo;
|
||||
u32 src_offset, dst_offset;
|
||||
s32 src_pitch, dst_pitch;
|
||||
s32 width, height;
|
||||
@ -1184,11 +1184,11 @@ vmw_stdu_bo_populate_update_cpu(struct vmw_du_update_plane *update, void *cmd,
|
||||
|
||||
diff.cpp = stdu->cpp;
|
||||
|
||||
dst_bo = &stdu->display_srf->res.guest_memory_bo->tbo;
|
||||
dst_bo = stdu->display_srf->res.guest_memory_bo;
|
||||
dst_pitch = stdu->display_srf->metadata.base_size.width * stdu->cpp;
|
||||
dst_offset = bb->y1 * dst_pitch + bb->x1 * stdu->cpp;
|
||||
|
||||
src_bo = &vfbbo->buffer->tbo;
|
||||
src_bo = vfbbo->buffer;
|
||||
src_pitch = update->vfb->base.pitches[0];
|
||||
src_offset = bo_update->fb_top * src_pitch + bo_update->fb_left *
|
||||
stdu->cpp;
|
||||
|
@ -2283,9 +2283,11 @@ int vmw_dumb_create(struct drm_file *file_priv,
|
||||
/*
|
||||
* Without mob support we're just going to use raw memory buffer
|
||||
* because we wouldn't be able to support full surface coherency
|
||||
* without mobs
|
||||
* without mobs. There also no reason to support surface coherency
|
||||
* without 3d (i.e. gpu usage on the host) because then all the
|
||||
* contents is going to be rendered guest side.
|
||||
*/
|
||||
if (!dev_priv->has_mob) {
|
||||
if (!dev_priv->has_mob || !vmw_supports_3d(dev_priv)) {
|
||||
int cpp = DIV_ROUND_UP(args->bpp, 8);
|
||||
|
||||
switch (cpp) {
|
||||
|
@ -450,7 +450,7 @@ static int xe_hwmon_pcode_write_i1(struct xe_gt *gt, u32 uval)
|
||||
{
|
||||
return xe_pcode_write(gt, PCODE_MBOX(PCODE_POWER_SETUP,
|
||||
POWER_SETUP_SUBCOMMAND_WRITE_I1, 0),
|
||||
uval);
|
||||
(uval & POWER_SETUP_I1_DATA_MASK));
|
||||
}
|
||||
|
||||
static int xe_hwmon_power_curr_crit_read(struct xe_hwmon *hwmon, int channel,
|
||||
|
@ -3341,9 +3341,10 @@ int xe_vm_invalidate_vma(struct xe_vma *vma)
|
||||
{
|
||||
struct xe_device *xe = xe_vma_vm(vma)->xe;
|
||||
struct xe_tile *tile;
|
||||
struct xe_gt_tlb_invalidation_fence fence[XE_MAX_TILES_PER_DEVICE];
|
||||
u32 tile_needs_invalidate = 0;
|
||||
struct xe_gt_tlb_invalidation_fence
|
||||
fence[XE_MAX_TILES_PER_DEVICE * XE_MAX_GT_PER_TILE];
|
||||
u8 id;
|
||||
u32 fence_id = 0;
|
||||
int ret = 0;
|
||||
|
||||
xe_assert(xe, !xe_vma_is_null(vma));
|
||||
@ -3371,27 +3372,37 @@ int xe_vm_invalidate_vma(struct xe_vma *vma)
|
||||
if (xe_pt_zap_ptes(tile, vma)) {
|
||||
xe_device_wmb(xe);
|
||||
xe_gt_tlb_invalidation_fence_init(tile->primary_gt,
|
||||
&fence[id], true);
|
||||
&fence[fence_id],
|
||||
true);
|
||||
|
||||
/*
|
||||
* FIXME: We potentially need to invalidate multiple
|
||||
* GTs within the tile
|
||||
*/
|
||||
ret = xe_gt_tlb_invalidation_vma(tile->primary_gt,
|
||||
&fence[id], vma);
|
||||
&fence[fence_id], vma);
|
||||
if (ret < 0) {
|
||||
xe_gt_tlb_invalidation_fence_fini(&fence[id]);
|
||||
xe_gt_tlb_invalidation_fence_fini(&fence[fence_id]);
|
||||
goto wait;
|
||||
}
|
||||
++fence_id;
|
||||
|
||||
tile_needs_invalidate |= BIT(id);
|
||||
if (!tile->media_gt)
|
||||
continue;
|
||||
|
||||
xe_gt_tlb_invalidation_fence_init(tile->media_gt,
|
||||
&fence[fence_id],
|
||||
true);
|
||||
|
||||
ret = xe_gt_tlb_invalidation_vma(tile->media_gt,
|
||||
&fence[fence_id], vma);
|
||||
if (ret < 0) {
|
||||
xe_gt_tlb_invalidation_fence_fini(&fence[fence_id]);
|
||||
goto wait;
|
||||
}
|
||||
++fence_id;
|
||||
}
|
||||
}
|
||||
|
||||
wait:
|
||||
for_each_tile(tile, xe, id)
|
||||
if (tile_needs_invalidate & BIT(id))
|
||||
xe_gt_tlb_invalidation_fence_wait(&fence[id]);
|
||||
for (id = 0; id < fence_id; ++id)
|
||||
xe_gt_tlb_invalidation_fence_wait(&fence[id]);
|
||||
|
||||
vma->tile_invalidated = vma->tile_mask;
|
||||
|
||||
|
@ -592,7 +592,7 @@ static int __init of_platform_default_populate_init(void)
|
||||
* This can happen for example on DT systems that do EFI
|
||||
* booting and may provide a GOP handle to the EFI stub.
|
||||
*/
|
||||
sysfb_disable();
|
||||
sysfb_disable(NULL);
|
||||
of_platform_device_create(node, NULL, NULL);
|
||||
of_node_put(node);
|
||||
}
|
||||
|
@ -293,7 +293,7 @@ int aperture_remove_conflicting_devices(resource_size_t base, resource_size_t si
|
||||
* ask for this, so let's assume that a real driver for the display
|
||||
* was already probed and prevent sysfb to register devices later.
|
||||
*/
|
||||
sysfb_disable();
|
||||
sysfb_disable(NULL);
|
||||
|
||||
aperture_detach_devices(base, size);
|
||||
|
||||
@ -346,15 +346,10 @@ EXPORT_SYMBOL(__aperture_remove_legacy_vga_devices);
|
||||
*/
|
||||
int aperture_remove_conflicting_pci_devices(struct pci_dev *pdev, const char *name)
|
||||
{
|
||||
bool primary = false;
|
||||
resource_size_t base, size;
|
||||
int bar, ret = 0;
|
||||
|
||||
if (pdev == vga_default_device())
|
||||
primary = true;
|
||||
|
||||
if (primary)
|
||||
sysfb_disable();
|
||||
sysfb_disable(&pdev->dev);
|
||||
|
||||
for (bar = 0; bar < PCI_STD_NUM_BARS; ++bar) {
|
||||
if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM))
|
||||
@ -370,7 +365,7 @@ int aperture_remove_conflicting_pci_devices(struct pci_dev *pdev, const char *na
|
||||
* that consumes the VGA framebuffer I/O range. Remove this
|
||||
* device as well.
|
||||
*/
|
||||
if (primary)
|
||||
if (pdev == vga_default_device())
|
||||
ret = __aperture_remove_legacy_vga_devices(pdev);
|
||||
|
||||
return ret;
|
||||
|
@ -772,15 +772,18 @@
|
||||
INTEL_ATS_M75_IDS(MACRO__, ## __VA_ARGS__)
|
||||
|
||||
/* MTL */
|
||||
#define INTEL_MTL_IDS(MACRO__, ...) \
|
||||
MACRO__(0x7D40, ## __VA_ARGS__), \
|
||||
#define INTEL_ARL_IDS(MACRO__, ...) \
|
||||
MACRO__(0x7D41, ## __VA_ARGS__), \
|
||||
MACRO__(0x7D45, ## __VA_ARGS__), \
|
||||
MACRO__(0x7D51, ## __VA_ARGS__), \
|
||||
MACRO__(0x7D67, ## __VA_ARGS__), \
|
||||
MACRO__(0x7DD1, ## __VA_ARGS__)
|
||||
|
||||
#define INTEL_MTL_IDS(MACRO__, ...) \
|
||||
INTEL_ARL_IDS(MACRO__, ## __VA_ARGS__), \
|
||||
MACRO__(0x7D40, ## __VA_ARGS__), \
|
||||
MACRO__(0x7D45, ## __VA_ARGS__), \
|
||||
MACRO__(0x7D55, ## __VA_ARGS__), \
|
||||
MACRO__(0x7D60, ## __VA_ARGS__), \
|
||||
MACRO__(0x7D67, ## __VA_ARGS__), \
|
||||
MACRO__(0x7DD1, ## __VA_ARGS__), \
|
||||
MACRO__(0x7DD5, ## __VA_ARGS__)
|
||||
|
||||
/* LNL */
|
||||
|
@ -39,11 +39,7 @@
|
||||
#include "ttm_device.h"
|
||||
|
||||
/* Default number of pre-faulted pages in the TTM fault handler */
|
||||
#if CONFIG_PGTABLE_LEVELS > 2
|
||||
#define TTM_BO_VM_NUM_PREFAULT (1 << (PMD_SHIFT - PAGE_SHIFT))
|
||||
#else
|
||||
#define TTM_BO_VM_NUM_PREFAULT 16
|
||||
#endif
|
||||
|
||||
struct iosys_map;
|
||||
|
||||
|
@ -58,11 +58,11 @@ struct efifb_dmi_info {
|
||||
|
||||
#ifdef CONFIG_SYSFB
|
||||
|
||||
void sysfb_disable(void);
|
||||
void sysfb_disable(struct device *dev);
|
||||
|
||||
#else /* CONFIG_SYSFB */
|
||||
|
||||
static inline void sysfb_disable(void)
|
||||
static inline void sysfb_disable(struct device *dev)
|
||||
{
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user