drm fixes for 6.2-rc6

drm:
 - DP MST kref fix
 - fb_helper: check return value
 
 i915:
 - Fix BSC default context for Meteor Lake
 - Fix selftest-scheduler's modify_type
 - memory leak fix
 
 amdgpu:
 - GC11.x fixes
 - SMU13.0.0 fix
 - Freesync video fix
 - DP MST fixes
 - build fix
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEEKbZHaGwW9KfbeusDHTzWXnEhr4FAmPTXuMACgkQDHTzWXnE
 hr5kaw/+JVm3ghpcdjueHjanKEIk1gzpXY9mqfv2VyNoJvl+9Tf9IdCZ3KDJIH4u
 /fCR10IAWO5rUDE6pYELkwE78Fj4d0Tqj/OcuLTdIPsdzsvmh2gMdlKB24BiWods
 T4mFca0xpq4Ogeh2sfvBmNxUejK/4syiKYyLkvMKR51Nt9M0iF0WEY6RYgm/XYSz
 sLAkeAgh+YdEfH94zEj0U1Hi+N5RG1bQGbbrEbKsjrf10XoyICzP2U0MbtECQhQt
 dqYaLSnrQEiNybOtdG+H/oi4Gvn7MXe7QRSBBcgLtHYG+CR89rbw1imgVHDQhxA3
 G2SLUqTMdZ2m9oG8KRe1HTVr0kXl063Sdv7XWmJx5pZoXlKj5GuSGs2eaG+tdm3y
 SlwaOzSdo3v4ad5txFS1GKlpas8bhSFQ0XSQfJ0wXt7rXzz+1R4v8OJ8LWdLu/gP
 mFIMZ8o5U850hq8Y6m//Ivwff5MzPmrvcG53my9lr7D7/YH1J+UvmhfK9JtfllDe
 GxC4GXQoodhIqvgxGlJ1YX2GmJRGfrplhYPmdWSeNoUn+zPinOGFdWqgH50bKNvS
 0aykDMHBpsdxnzjugVaOGxpwCeYX/GZtcuJKCU7Ak/bGzuKoFU+aKKYFhO2u14pe
 N2pxqP+RRtHzif5vmIKSkjACYCAfjaaVvHcEMv2C1f5ySXXRTSM=
 =w7XH
 -----END PGP SIGNATURE-----

Merge tag 'drm-fixes-2023-01-27' of git://anongit.freedesktop.org/drm/drm

Pull drm fixes from Dave Airlie:
 "Fairly small this week as well, i915 has a memory leak fix and some
  minor changes, and amdgpu has some MST fixes, and some other minor
  ones:

  drm:
   - DP MST kref fix
   - fb_helper: check return value

  i915:
   - Fix BSC default context for Meteor Lake
   - Fix selftest-scheduler's modify_type
   - memory leak fix

  amdgpu:
   - GC11.x fixes
   - SMU13.0.0 fix
   - Freesync video fix
   - DP MST fixes
   - build fix"

* tag 'drm-fixes-2023-01-27' of git://anongit.freedesktop.org/drm/drm:
  amdgpu: fix build on non-DCN platforms.
  drm/amd/display: Fix timing not changning when freesync video is enabled
  drm/display/dp_mst: Correct the kref of port.
  drm/amdgpu/display/mst: update mst_mgr relevant variable when long HPD
  drm/amdgpu/display/mst: limit payload to be updated one by one
  drm/amdgpu/display/mst: Fix mst_state->pbn_div and slot count assignments
  drm/amdgpu: declare firmware for new MES 11.0.4
  drm/amdgpu: enable imu firmware for GC 11.0.4
  drm/amd/pm: add missing AllowIHInterrupt message mapping for SMU13.0.0
  drm/amdgpu: remove unconditional trap enable on add gfx11 queues
  drm/fb-helper: Use a per-driver FB deferred I/O handler
  drm/fb-helper: Check fb_deferred_io_init() return value
  drm/i915/selftest: fix intel_selftest_modify_policy argument types
  drm/i915/mtl: Fix bcs default context
  drm/i915: Fix a memory leak with reused mmap_offset
  drm/drm_vma_manager: Add drm_vma_node_allow_once()
This commit is contained in:
Linus Torvalds 2023-01-27 13:18:14 -08:00
commit 76e26e3c6a
15 changed files with 167 additions and 89 deletions

View File

@ -35,6 +35,7 @@ MODULE_FIRMWARE("amdgpu/gc_11_0_0_imu.bin");
MODULE_FIRMWARE("amdgpu/gc_11_0_1_imu.bin");
MODULE_FIRMWARE("amdgpu/gc_11_0_2_imu.bin");
MODULE_FIRMWARE("amdgpu/gc_11_0_3_imu.bin");
MODULE_FIRMWARE("amdgpu/gc_11_0_4_imu.bin");
static int imu_v11_0_init_microcode(struct amdgpu_device *adev)
{

View File

@ -40,6 +40,8 @@ MODULE_FIRMWARE("amdgpu/gc_11_0_2_mes.bin");
MODULE_FIRMWARE("amdgpu/gc_11_0_2_mes1.bin");
MODULE_FIRMWARE("amdgpu/gc_11_0_3_mes.bin");
MODULE_FIRMWARE("amdgpu/gc_11_0_3_mes1.bin");
MODULE_FIRMWARE("amdgpu/gc_11_0_4_mes.bin");
MODULE_FIRMWARE("amdgpu/gc_11_0_4_mes1.bin");
static int mes_v11_0_hw_fini(void *handle);
static int mes_v11_0_kiq_hw_init(struct amdgpu_device *adev);
@ -196,7 +198,6 @@ static int mes_v11_0_add_hw_queue(struct amdgpu_mes *mes,
mes_add_queue_pkt.trap_handler_addr = input->tba_addr;
mes_add_queue_pkt.tma_addr = input->tma_addr;
mes_add_queue_pkt.is_kfd_process = input->is_kfd_process;
mes_add_queue_pkt.trap_en = 1;
/* For KFD, gds_size is re-used for queue size (needed in MES for AQL queues) */
mes_add_queue_pkt.is_aql_queue = input->is_aql_queue;

View File

@ -8881,6 +8881,13 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
if (!dm_old_crtc_state->stream)
goto skip_modeset;
/* Unset freesync video if it was active before */
if (dm_old_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED) {
dm_new_crtc_state->freesync_config.state = VRR_STATE_INACTIVE;
dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = 0;
}
/* Now check if we should set freesync video mode */
if (amdgpu_freesync_vid_mode && dm_new_crtc_state->stream &&
is_timing_unchanged_for_freesync(new_crtc_state,
old_crtc_state)) {
@ -9497,6 +9504,8 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
bool lock_and_validation_needed = false;
struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
#if defined(CONFIG_DRM_AMD_DC_DCN)
struct drm_dp_mst_topology_mgr *mgr;
struct drm_dp_mst_topology_state *mst_state;
struct dsc_mst_fairness_vars vars[MAX_PIPES];
#endif
@ -9745,6 +9754,28 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
lock_and_validation_needed = true;
}
#if defined(CONFIG_DRM_AMD_DC_DCN)
/* set the slot info for each mst_state based on the link encoding format */
for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) {
struct amdgpu_dm_connector *aconnector;
struct drm_connector *connector;
struct drm_connector_list_iter iter;
u8 link_coding_cap;
drm_connector_list_iter_begin(dev, &iter);
drm_for_each_connector_iter(connector, &iter) {
if (connector->index == mst_state->mgr->conn_base_id) {
aconnector = to_amdgpu_dm_connector(connector);
link_coding_cap = dc_link_dp_mst_decide_link_encoding_format(aconnector->dc_link);
drm_dp_mst_update_slots(mst_state, link_coding_cap);
break;
}
}
drm_connector_list_iter_end(&iter);
}
#endif
/**
* Streams and planes are reset when there are changes that affect
* bandwidth. Anything that affects bandwidth needs to go through

View File

@ -120,23 +120,50 @@ enum dc_edid_status dm_helpers_parse_edid_caps(
}
static void
fill_dc_mst_payload_table_from_drm(struct drm_dp_mst_topology_state *mst_state,
struct amdgpu_dm_connector *aconnector,
fill_dc_mst_payload_table_from_drm(struct dc_link *link,
bool enable,
struct drm_dp_mst_atomic_payload *target_payload,
struct dc_dp_mst_stream_allocation_table *table)
{
struct dc_dp_mst_stream_allocation_table new_table = { 0 };
struct dc_dp_mst_stream_allocation *sa;
struct drm_dp_mst_atomic_payload *payload;
struct link_mst_stream_allocation_table copy_of_link_table =
link->mst_stream_alloc_table;
int i;
int current_hw_table_stream_cnt = copy_of_link_table.stream_count;
struct link_mst_stream_allocation *dc_alloc;
/* TODO: refactor to set link->mst_stream_alloc_table directly if possible.*/
if (enable) {
dc_alloc =
&copy_of_link_table.stream_allocations[current_hw_table_stream_cnt];
dc_alloc->vcp_id = target_payload->vcpi;
dc_alloc->slot_count = target_payload->time_slots;
} else {
for (i = 0; i < copy_of_link_table.stream_count; i++) {
dc_alloc =
&copy_of_link_table.stream_allocations[i];
if (dc_alloc->vcp_id == target_payload->vcpi) {
dc_alloc->vcp_id = 0;
dc_alloc->slot_count = 0;
break;
}
}
ASSERT(i != copy_of_link_table.stream_count);
}
/* Fill payload info*/
list_for_each_entry(payload, &mst_state->payloads, next) {
if (payload->delete)
continue;
sa = &new_table.stream_allocations[new_table.stream_count];
sa->slot_count = payload->time_slots;
sa->vcp_id = payload->vcpi;
new_table.stream_count++;
for (i = 0; i < MAX_CONTROLLER_NUM; i++) {
dc_alloc =
&copy_of_link_table.stream_allocations[i];
if (dc_alloc->vcp_id > 0 && dc_alloc->slot_count > 0) {
sa = &new_table.stream_allocations[new_table.stream_count];
sa->slot_count = dc_alloc->slot_count;
sa->vcp_id = dc_alloc->vcp_id;
new_table.stream_count++;
}
}
/* Overwrite the old table */
@ -185,7 +212,7 @@ bool dm_helpers_dp_mst_write_payload_allocation_table(
* AUX message. The sequence is slot 1-63 allocated sequence for each
* stream. AMD ASIC stream slot allocation should follow the same
* sequence. copy DRM MST allocation to dc */
fill_dc_mst_payload_table_from_drm(mst_state, aconnector, proposed_table);
fill_dc_mst_payload_table_from_drm(stream->link, enable, payload, proposed_table);
return true;
}

View File

@ -903,11 +903,6 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
if (IS_ERR(mst_state))
return PTR_ERR(mst_state);
mst_state->pbn_div = dm_mst_get_pbn_divider(dc_link);
#if defined(CONFIG_DRM_AMD_DC_DCN)
drm_dp_mst_update_slots(mst_state, dc_link_dp_mst_decide_link_encoding_format(dc_link));
#endif
/* Set up params */
for (i = 0; i < dc_state->stream_count; i++) {
struct dc_dsc_policy dsc_policy = {0};

View File

@ -3995,10 +3995,13 @@ static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx)
struct fixed31_32 avg_time_slots_per_mtp = dc_fixpt_from_int(0);
int i;
bool mst_mode = (link->type == dc_connection_mst_branch);
/* adjust for drm changes*/
bool update_drm_mst_state = true;
const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res);
const struct dc_link_settings empty_link_settings = {0};
DC_LOGGER_INIT(link->ctx->logger);
/* deallocate_mst_payload is called before disable link. When mode or
* disable/enable monitor, new stream is created which is not in link
* stream[] yet. For this, payload is not allocated yet, so de-alloc
@ -4014,7 +4017,7 @@ static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx)
&empty_link_settings,
avg_time_slots_per_mtp);
if (mst_mode) {
if (mst_mode || update_drm_mst_state) {
/* when link is in mst mode, reply on mst manager to remove
* payload
*/
@ -4077,11 +4080,18 @@ static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx)
stream->ctx,
stream);
if (!update_drm_mst_state)
dm_helpers_dp_mst_send_payload_allocation(
stream->ctx,
stream,
false);
}
if (update_drm_mst_state)
dm_helpers_dp_mst_send_payload_allocation(
stream->ctx,
stream,
false);
}
return DC_OK;
}

View File

@ -145,6 +145,7 @@ static struct cmn2asic_msg_mapping smu_v13_0_0_message_map[SMU_MSG_MAX_COUNT] =
MSG_MAP(SetBadMemoryPagesRetiredFlagsPerChannel,
PPSMC_MSG_SetBadMemoryPagesRetiredFlagsPerChannel, 0),
MSG_MAP(AllowGpo, PPSMC_MSG_SetGpoAllow, 0),
MSG_MAP(AllowIHHostInterrupt, PPSMC_MSG_AllowIHHostInterrupt, 0),
};
static struct cmn2asic_mapping smu_v13_0_0_clk_map[SMU_CLK_COUNT] = {

View File

@ -3372,6 +3372,9 @@ void drm_dp_remove_payload(struct drm_dp_mst_topology_mgr *mgr,
mgr->payload_count--;
mgr->next_start_slot -= payload->time_slots;
if (payload->delete)
drm_dp_mst_put_port_malloc(payload->port);
}
EXPORT_SYMBOL(drm_dp_remove_payload);
@ -4327,7 +4330,6 @@ int drm_dp_atomic_release_time_slots(struct drm_atomic_state *state,
drm_dbg_atomic(mgr->dev, "[MST PORT:%p] TU %d -> 0\n", port, payload->time_slots);
if (!payload->delete) {
drm_dp_mst_put_port_malloc(port);
payload->pbn = 0;
payload->delete = true;
topology_state->payload_mask &= ~BIT(payload->vcpi - 1);

View File

@ -171,11 +171,6 @@ static const struct fb_ops drm_fbdev_fb_ops = {
.fb_imageblit = drm_fbdev_fb_imageblit,
};
static struct fb_deferred_io drm_fbdev_defio = {
.delay = HZ / 20,
.deferred_io = drm_fb_helper_deferred_io,
};
/*
* This function uses the client API to create a framebuffer backed by a dumb buffer.
*/
@ -222,8 +217,14 @@ static int drm_fbdev_fb_probe(struct drm_fb_helper *fb_helper,
return -ENOMEM;
fbi->flags |= FBINFO_VIRTFB | FBINFO_READS_FAST;
fbi->fbdefio = &drm_fbdev_defio;
fb_deferred_io_init(fbi);
/* Set a default deferred I/O handler */
fb_helper->fbdefio.delay = HZ / 20;
fb_helper->fbdefio.deferred_io = drm_fb_helper_deferred_io;
fbi->fbdefio = &fb_helper->fbdefio;
ret = fb_deferred_io_init(fbi);
if (ret)
return ret;
} else {
/* buffer is mapped for HW framebuffer */
ret = drm_client_buffer_vmap(fb_helper->buffer, &map);

View File

@ -240,27 +240,8 @@ void drm_vma_offset_remove(struct drm_vma_offset_manager *mgr,
}
EXPORT_SYMBOL(drm_vma_offset_remove);
/**
* drm_vma_node_allow - Add open-file to list of allowed users
* @node: Node to modify
* @tag: Tag of file to remove
*
* Add @tag to the list of allowed open-files for this node. If @tag is
* already on this list, the ref-count is incremented.
*
* The list of allowed-users is preserved across drm_vma_offset_add() and
* drm_vma_offset_remove() calls. You may even call it if the node is currently
* not added to any offset-manager.
*
* You must remove all open-files the same number of times as you added them
* before destroying the node. Otherwise, you will leak memory.
*
* This is locked against concurrent access internally.
*
* RETURNS:
* 0 on success, negative error code on internal failure (out-of-mem)
*/
int drm_vma_node_allow(struct drm_vma_offset_node *node, struct drm_file *tag)
static int vma_node_allow(struct drm_vma_offset_node *node,
struct drm_file *tag, bool ref_counted)
{
struct rb_node **iter;
struct rb_node *parent = NULL;
@ -282,7 +263,8 @@ int drm_vma_node_allow(struct drm_vma_offset_node *node, struct drm_file *tag)
entry = rb_entry(*iter, struct drm_vma_offset_file, vm_rb);
if (tag == entry->vm_tag) {
entry->vm_count++;
if (ref_counted)
entry->vm_count++;
goto unlock;
} else if (tag > entry->vm_tag) {
iter = &(*iter)->rb_right;
@ -307,8 +289,58 @@ unlock:
kfree(new);
return ret;
}
/**
* drm_vma_node_allow - Add open-file to list of allowed users
* @node: Node to modify
* @tag: Tag of file to remove
*
* Add @tag to the list of allowed open-files for this node. If @tag is
* already on this list, the ref-count is incremented.
*
* The list of allowed-users is preserved across drm_vma_offset_add() and
* drm_vma_offset_remove() calls. You may even call it if the node is currently
* not added to any offset-manager.
*
* You must remove all open-files the same number of times as you added them
* before destroying the node. Otherwise, you will leak memory.
*
* This is locked against concurrent access internally.
*
* RETURNS:
* 0 on success, negative error code on internal failure (out-of-mem)
*/
int drm_vma_node_allow(struct drm_vma_offset_node *node, struct drm_file *tag)
{
return vma_node_allow(node, tag, true);
}
EXPORT_SYMBOL(drm_vma_node_allow);
/**
* drm_vma_node_allow_once - Add open-file to list of allowed users
* @node: Node to modify
* @tag: Tag of file to remove
*
* Add @tag to the list of allowed open-files for this node.
*
* The list of allowed-users is preserved across drm_vma_offset_add() and
* drm_vma_offset_remove() calls. You may even call it if the node is currently
* not added to any offset-manager.
*
* This is not ref-counted unlike drm_vma_node_allow() hence drm_vma_node_revoke()
* should only be called once after this.
*
* This is locked against concurrent access internally.
*
* RETURNS:
* 0 on success, negative error code on internal failure (out-of-mem)
*/
int drm_vma_node_allow_once(struct drm_vma_offset_node *node, struct drm_file *tag)
{
return vma_node_allow(node, tag, false);
}
EXPORT_SYMBOL(drm_vma_node_allow_once);
/**
* drm_vma_node_revoke - Remove open-file from list of allowed users
* @node: Node to modify

View File

@ -697,7 +697,7 @@ insert:
GEM_BUG_ON(lookup_mmo(obj, mmap_type) != mmo);
out:
if (file)
drm_vma_node_allow(&mmo->vma_node, file);
drm_vma_node_allow_once(&mmo->vma_node, file);
return mmo;
err:

View File

@ -288,39 +288,6 @@ static const u8 dg2_xcs_offsets[] = {
END
};
static const u8 mtl_xcs_offsets[] = {
NOP(1),
LRI(13, POSTED),
REG16(0x244),
REG(0x034),
REG(0x030),
REG(0x038),
REG(0x03c),
REG(0x168),
REG(0x140),
REG(0x110),
REG(0x1c0),
REG(0x1c4),
REG(0x1c8),
REG(0x180),
REG16(0x2b4),
NOP(4),
NOP(1),
LRI(9, POSTED),
REG16(0x3a8),
REG16(0x28c),
REG16(0x288),
REG16(0x284),
REG16(0x280),
REG16(0x27c),
REG16(0x278),
REG16(0x274),
REG16(0x270),
END
};
static const u8 gen8_rcs_offsets[] = {
NOP(1),
LRI(14, POSTED),
@ -739,9 +706,7 @@ static const u8 *reg_offsets(const struct intel_engine_cs *engine)
else
return gen8_rcs_offsets;
} else {
if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 70))
return mtl_xcs_offsets;
else if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 55))
if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 55))
return dg2_xcs_offsets;
else if (GRAPHICS_VER(engine->i915) >= 12)
return gen12_xcs_offsets;

View File

@ -28,8 +28,7 @@ struct intel_engine_cs *intel_selftest_find_any_engine(struct intel_gt *gt)
int intel_selftest_modify_policy(struct intel_engine_cs *engine,
struct intel_selftest_saved_policy *saved,
u32 modify_type)
enum selftest_scheduler_modify modify_type)
{
int err;

View File

@ -208,6 +208,18 @@ struct drm_fb_helper {
* the smem_start field should always be cleared to zero.
*/
bool hint_leak_smem_start;
#ifdef CONFIG_FB_DEFERRED_IO
/**
* @fbdefio:
*
* Temporary storage for the driver's FB deferred I/O handler. If the
* driver uses the DRM fbdev emulation layer, this is set by the core
* to a generic deferred I/O handler if a driver is preferring to use
* a shadow buffer.
*/
struct fb_deferred_io fbdefio;
#endif
};
static inline struct drm_fb_helper *

View File

@ -74,6 +74,7 @@ void drm_vma_offset_remove(struct drm_vma_offset_manager *mgr,
struct drm_vma_offset_node *node);
int drm_vma_node_allow(struct drm_vma_offset_node *node, struct drm_file *tag);
int drm_vma_node_allow_once(struct drm_vma_offset_node *node, struct drm_file *tag);
void drm_vma_node_revoke(struct drm_vma_offset_node *node,
struct drm_file *tag);
bool drm_vma_node_is_allowed(struct drm_vma_offset_node *node,