mirror of
https://github.com/torvalds/linux.git
synced 2024-11-21 11:31:31 +00:00
Merge branch 'torvalds:master' into master
This commit is contained in:
commit
80494eee60
@ -123,20 +123,20 @@ static u32 crc32c_mips_le_hw(u32 crc_, const u8 *p, unsigned int len)
|
||||
for (; len >= sizeof(u64); p += sizeof(u64), len -= sizeof(u64)) {
|
||||
u64 value = get_unaligned_le64(p);
|
||||
|
||||
CRC32(crc, value, d);
|
||||
CRC32C(crc, value, d);
|
||||
}
|
||||
|
||||
if (len & sizeof(u32)) {
|
||||
u32 value = get_unaligned_le32(p);
|
||||
|
||||
CRC32(crc, value, w);
|
||||
CRC32C(crc, value, w);
|
||||
p += sizeof(u32);
|
||||
}
|
||||
} else {
|
||||
for (; len >= sizeof(u32); len -= sizeof(u32)) {
|
||||
u32 value = get_unaligned_le32(p);
|
||||
|
||||
CRC32(crc, value, w);
|
||||
CRC32C(crc, value, w);
|
||||
p += sizeof(u32);
|
||||
}
|
||||
}
|
||||
|
@ -373,7 +373,7 @@ static int iter_perf_levels_update_state(struct scmi_iterator_state *st,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void
|
||||
static inline int
|
||||
process_response_opp(struct device *dev, struct perf_dom_info *dom,
|
||||
struct scmi_opp *opp, unsigned int loop_idx,
|
||||
const struct scmi_msg_resp_perf_describe_levels *r)
|
||||
@ -386,12 +386,16 @@ process_response_opp(struct device *dev, struct perf_dom_info *dom,
|
||||
le16_to_cpu(r->opp[loop_idx].transition_latency_us);
|
||||
|
||||
ret = xa_insert(&dom->opps_by_lvl, opp->perf, opp, GFP_KERNEL);
|
||||
if (ret)
|
||||
dev_warn(dev, "Failed to add opps_by_lvl at %d for %s - ret:%d\n",
|
||||
if (ret) {
|
||||
dev_info(dev, FW_BUG "Failed to add opps_by_lvl at %d for %s - ret:%d\n",
|
||||
opp->perf, dom->info.name, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void
|
||||
static inline int
|
||||
process_response_opp_v4(struct device *dev, struct perf_dom_info *dom,
|
||||
struct scmi_opp *opp, unsigned int loop_idx,
|
||||
const struct scmi_msg_resp_perf_describe_levels_v4 *r)
|
||||
@ -404,9 +408,11 @@ process_response_opp_v4(struct device *dev, struct perf_dom_info *dom,
|
||||
le16_to_cpu(r->opp[loop_idx].transition_latency_us);
|
||||
|
||||
ret = xa_insert(&dom->opps_by_lvl, opp->perf, opp, GFP_KERNEL);
|
||||
if (ret)
|
||||
dev_warn(dev, "Failed to add opps_by_lvl at %d for %s - ret:%d\n",
|
||||
if (ret) {
|
||||
dev_info(dev, FW_BUG "Failed to add opps_by_lvl at %d for %s - ret:%d\n",
|
||||
opp->perf, dom->info.name, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Note that PERF v4 reports always five 32-bit words */
|
||||
opp->indicative_freq = le32_to_cpu(r->opp[loop_idx].indicative_freq);
|
||||
@ -415,13 +421,21 @@ process_response_opp_v4(struct device *dev, struct perf_dom_info *dom,
|
||||
|
||||
ret = xa_insert(&dom->opps_by_idx, opp->level_index, opp,
|
||||
GFP_KERNEL);
|
||||
if (ret)
|
||||
if (ret) {
|
||||
dev_warn(dev,
|
||||
"Failed to add opps_by_idx at %d for %s - ret:%d\n",
|
||||
opp->level_index, dom->info.name, ret);
|
||||
|
||||
/* Cleanup by_lvl too */
|
||||
xa_erase(&dom->opps_by_lvl, opp->perf);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
hash_add(dom->opps_by_freq, &opp->hash, opp->indicative_freq);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
@ -429,16 +443,22 @@ iter_perf_levels_process_response(const struct scmi_protocol_handle *ph,
|
||||
const void *response,
|
||||
struct scmi_iterator_state *st, void *priv)
|
||||
{
|
||||
int ret;
|
||||
struct scmi_opp *opp;
|
||||
struct scmi_perf_ipriv *p = priv;
|
||||
|
||||
opp = &p->perf_dom->opp[st->desc_index + st->loop_idx];
|
||||
opp = &p->perf_dom->opp[p->perf_dom->opp_count];
|
||||
if (PROTOCOL_REV_MAJOR(p->version) <= 0x3)
|
||||
process_response_opp(ph->dev, p->perf_dom, opp, st->loop_idx,
|
||||
response);
|
||||
ret = process_response_opp(ph->dev, p->perf_dom, opp,
|
||||
st->loop_idx, response);
|
||||
else
|
||||
process_response_opp_v4(ph->dev, p->perf_dom, opp, st->loop_idx,
|
||||
response);
|
||||
ret = process_response_opp_v4(ph->dev, p->perf_dom, opp,
|
||||
st->loop_idx, response);
|
||||
|
||||
/* Skip BAD duplicates received from firmware */
|
||||
if (ret)
|
||||
return ret == -EBUSY ? 0 : ret;
|
||||
|
||||
p->perf_dom->opp_count++;
|
||||
|
||||
dev_dbg(ph->dev, "Level %d Power %d Latency %dus Ifreq %d Index %d\n",
|
||||
|
@ -161,7 +161,8 @@ void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
|
||||
* When GTT is just an alternative to VRAM make sure that we
|
||||
* only use it as fallback and still try to fill up VRAM first.
|
||||
*/
|
||||
if (domain & abo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM)
|
||||
if (domain & abo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM &&
|
||||
!(adev->flags & AMD_IS_APU))
|
||||
places[c].flags |= TTM_PL_FLAG_FALLBACK;
|
||||
c++;
|
||||
}
|
||||
|
@ -1124,8 +1124,10 @@ static void gmc_v9_0_get_coherence_flags(struct amdgpu_device *adev,
|
||||
uint64_t *flags)
|
||||
{
|
||||
struct amdgpu_device *bo_adev = amdgpu_ttm_adev(bo->tbo.bdev);
|
||||
bool is_vram = bo->tbo.resource->mem_type == TTM_PL_VRAM;
|
||||
bool coherent = bo->flags & (AMDGPU_GEM_CREATE_COHERENT | AMDGPU_GEM_CREATE_EXT_COHERENT);
|
||||
bool is_vram = bo->tbo.resource &&
|
||||
bo->tbo.resource->mem_type == TTM_PL_VRAM;
|
||||
bool coherent = bo->flags & (AMDGPU_GEM_CREATE_COHERENT |
|
||||
AMDGPU_GEM_CREATE_EXT_COHERENT);
|
||||
bool ext_coherent = bo->flags & AMDGPU_GEM_CREATE_EXT_COHERENT;
|
||||
bool uncached = bo->flags & AMDGPU_GEM_CREATE_UNCACHED;
|
||||
struct amdgpu_vm *vm = mapping->bo_va->base.vm;
|
||||
@ -1133,6 +1135,8 @@ static void gmc_v9_0_get_coherence_flags(struct amdgpu_device *adev,
|
||||
bool snoop = false;
|
||||
bool is_local;
|
||||
|
||||
dma_resv_assert_held(bo->tbo.base.resv);
|
||||
|
||||
switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
|
||||
case IP_VERSION(9, 4, 1):
|
||||
case IP_VERSION(9, 4, 2):
|
||||
@ -1251,9 +1255,8 @@ static void gmc_v9_0_get_vm_pte(struct amdgpu_device *adev,
|
||||
*flags &= ~AMDGPU_PTE_VALID;
|
||||
}
|
||||
|
||||
if (bo && bo->tbo.resource)
|
||||
gmc_v9_0_get_coherence_flags(adev, mapping->bo_va->base.bo,
|
||||
mapping, flags);
|
||||
if ((*flags & AMDGPU_PTE_VALID) && bo)
|
||||
gmc_v9_0_get_coherence_flags(adev, bo, mapping, flags);
|
||||
}
|
||||
|
||||
static void gmc_v9_0_override_vm_pte_flags(struct amdgpu_device *adev,
|
||||
|
@ -550,7 +550,7 @@ static int mes_v12_0_set_hw_resources_1(struct amdgpu_mes *mes, int pipe)
|
||||
mes_set_hw_res_1_pkt.header.type = MES_API_TYPE_SCHEDULER;
|
||||
mes_set_hw_res_1_pkt.header.opcode = MES_SCH_API_SET_HW_RSRC_1;
|
||||
mes_set_hw_res_1_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS;
|
||||
mes_set_hw_res_1_pkt.mes_kiq_unmap_timeout = 100;
|
||||
mes_set_hw_res_1_pkt.mes_kiq_unmap_timeout = 0xa;
|
||||
|
||||
return mes_v12_0_submit_pkt_and_poll_completion(mes, pipe,
|
||||
&mes_set_hw_res_1_pkt, sizeof(mes_set_hw_res_1_pkt),
|
||||
|
@ -247,6 +247,12 @@ static void nbio_v7_7_init_registers(struct amdgpu_device *adev)
|
||||
if (def != data)
|
||||
WREG32_SOC15(NBIO, 0, regBIF0_PCIE_MST_CTRL_3, data);
|
||||
|
||||
switch (adev->ip_versions[NBIO_HWIP][0]) {
|
||||
case IP_VERSION(7, 7, 0):
|
||||
data = RREG32_SOC15(NBIO, 0, regRCC_DEV0_EPF5_STRAP4) & ~BIT(23);
|
||||
WREG32_SOC15(NBIO, 0, regRCC_DEV0_EPF5_STRAP4, data);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static void nbio_v7_7_update_medium_grain_clock_gating(struct amdgpu_device *adev,
|
||||
|
@ -67,8 +67,8 @@ static const struct amd_ip_funcs nv_common_ip_funcs;
|
||||
|
||||
/* Navi */
|
||||
static const struct amdgpu_video_codec_info nv_video_codecs_encode_array[] = {
|
||||
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2304, 0)},
|
||||
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 2304, 0)},
|
||||
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 0)},
|
||||
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 4096, 0)},
|
||||
};
|
||||
|
||||
static const struct amdgpu_video_codecs nv_video_codecs_encode = {
|
||||
@ -94,8 +94,8 @@ static const struct amdgpu_video_codecs nv_video_codecs_decode = {
|
||||
|
||||
/* Sienna Cichlid */
|
||||
static const struct amdgpu_video_codec_info sc_video_codecs_encode_array[] = {
|
||||
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2160, 0)},
|
||||
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 7680, 4352, 0)},
|
||||
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 0)},
|
||||
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 0)},
|
||||
};
|
||||
|
||||
static const struct amdgpu_video_codecs sc_video_codecs_encode = {
|
||||
@ -136,8 +136,8 @@ static const struct amdgpu_video_codecs sc_video_codecs_decode_vcn1 = {
|
||||
|
||||
/* SRIOV Sienna Cichlid, not const since data is controlled by host */
|
||||
static struct amdgpu_video_codec_info sriov_sc_video_codecs_encode_array[] = {
|
||||
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2160, 0)},
|
||||
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 7680, 4352, 0)},
|
||||
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 0)},
|
||||
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 0)},
|
||||
};
|
||||
|
||||
static struct amdgpu_video_codec_info sriov_sc_video_codecs_decode_array_vcn0[] = {
|
||||
|
@ -90,8 +90,8 @@ static const struct amd_ip_funcs soc15_common_ip_funcs;
|
||||
/* Vega, Raven, Arcturus */
|
||||
static const struct amdgpu_video_codec_info vega_video_codecs_encode_array[] =
|
||||
{
|
||||
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2304, 0)},
|
||||
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 2304, 0)},
|
||||
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 0)},
|
||||
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 4096, 0)},
|
||||
};
|
||||
|
||||
static const struct amdgpu_video_codecs vega_video_codecs_encode =
|
||||
|
@ -49,13 +49,13 @@ static const struct amd_ip_funcs soc21_common_ip_funcs;
|
||||
|
||||
/* SOC21 */
|
||||
static const struct amdgpu_video_codec_info vcn_4_0_0_video_codecs_encode_array_vcn0[] = {
|
||||
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2304, 0)},
|
||||
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 0)},
|
||||
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 0)},
|
||||
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1, 8192, 4352, 0)},
|
||||
};
|
||||
|
||||
static const struct amdgpu_video_codec_info vcn_4_0_0_video_codecs_encode_array_vcn1[] = {
|
||||
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2304, 0)},
|
||||
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 0)},
|
||||
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 0)},
|
||||
};
|
||||
|
||||
@ -96,14 +96,14 @@ static const struct amdgpu_video_codecs vcn_4_0_0_video_codecs_decode_vcn1 = {
|
||||
|
||||
/* SRIOV SOC21, not const since data is controlled by host */
|
||||
static struct amdgpu_video_codec_info sriov_vcn_4_0_0_video_codecs_encode_array_vcn0[] = {
|
||||
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2304, 0)},
|
||||
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 2304, 0)},
|
||||
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 0)},
|
||||
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 0)},
|
||||
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1, 8192, 4352, 0)},
|
||||
};
|
||||
|
||||
static struct amdgpu_video_codec_info sriov_vcn_4_0_0_video_codecs_encode_array_vcn1[] = {
|
||||
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2304, 0)},
|
||||
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 2304, 0)},
|
||||
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 0)},
|
||||
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 0)},
|
||||
};
|
||||
|
||||
static struct amdgpu_video_codecs sriov_vcn_4_0_0_video_codecs_encode_vcn0 = {
|
||||
|
@ -48,7 +48,7 @@
|
||||
static const struct amd_ip_funcs soc24_common_ip_funcs;
|
||||
|
||||
static const struct amdgpu_video_codec_info vcn_5_0_0_video_codecs_encode_array_vcn0[] = {
|
||||
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2304, 0)},
|
||||
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 0)},
|
||||
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 0)},
|
||||
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1, 8192, 4352, 0)},
|
||||
};
|
||||
|
@ -136,15 +136,15 @@ static const struct amdgpu_video_codec_info polaris_video_codecs_encode_array[]
|
||||
{
|
||||
.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC,
|
||||
.max_width = 4096,
|
||||
.max_height = 2304,
|
||||
.max_pixels_per_frame = 4096 * 2304,
|
||||
.max_height = 4096,
|
||||
.max_pixels_per_frame = 4096 * 4096,
|
||||
.max_level = 0,
|
||||
},
|
||||
{
|
||||
.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC,
|
||||
.max_width = 4096,
|
||||
.max_height = 2304,
|
||||
.max_pixels_per_frame = 4096 * 2304,
|
||||
.max_height = 4096,
|
||||
.max_pixels_per_frame = 4096 * 4096,
|
||||
.max_level = 0,
|
||||
},
|
||||
};
|
||||
|
@ -6762,7 +6762,7 @@ create_stream_for_sink(struct drm_connector *connector,
|
||||
if (stream->out_transfer_func.tf == TRANSFER_FUNCTION_GAMMA22)
|
||||
tf = TRANSFER_FUNC_GAMMA_22;
|
||||
mod_build_vsc_infopacket(stream, &stream->vsc_infopacket, stream->output_color_space, tf);
|
||||
aconnector->psr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY;
|
||||
aconnector->sr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY;
|
||||
|
||||
}
|
||||
finish:
|
||||
@ -8875,6 +8875,56 @@ static void amdgpu_dm_update_cursor(struct drm_plane *plane,
|
||||
}
|
||||
}
|
||||
|
||||
static void amdgpu_dm_enable_self_refresh(struct amdgpu_crtc *acrtc_attach,
|
||||
const struct dm_crtc_state *acrtc_state,
|
||||
const u64 current_ts)
|
||||
{
|
||||
struct psr_settings *psr = &acrtc_state->stream->link->psr_settings;
|
||||
struct replay_settings *pr = &acrtc_state->stream->link->replay_settings;
|
||||
struct amdgpu_dm_connector *aconn =
|
||||
(struct amdgpu_dm_connector *)acrtc_state->stream->dm_stream_context;
|
||||
|
||||
if (acrtc_state->update_type > UPDATE_TYPE_FAST) {
|
||||
if (pr->config.replay_supported && !pr->replay_feature_enabled)
|
||||
amdgpu_dm_link_setup_replay(acrtc_state->stream->link, aconn);
|
||||
else if (psr->psr_version != DC_PSR_VERSION_UNSUPPORTED &&
|
||||
!psr->psr_feature_enabled)
|
||||
if (!aconn->disallow_edp_enter_psr)
|
||||
amdgpu_dm_link_setup_psr(acrtc_state->stream);
|
||||
}
|
||||
|
||||
/* Decrement skip count when SR is enabled and we're doing fast updates. */
|
||||
if (acrtc_state->update_type == UPDATE_TYPE_FAST &&
|
||||
(psr->psr_feature_enabled || pr->config.replay_supported)) {
|
||||
if (aconn->sr_skip_count > 0)
|
||||
aconn->sr_skip_count--;
|
||||
|
||||
/* Allow SR when skip count is 0. */
|
||||
acrtc_attach->dm_irq_params.allow_sr_entry = !aconn->sr_skip_count;
|
||||
|
||||
/*
|
||||
* If sink supports PSR SU/Panel Replay, there is no need to rely on
|
||||
* a vblank event disable request to enable PSR/RP. PSR SU/RP
|
||||
* can be enabled immediately once OS demonstrates an
|
||||
* adequate number of fast atomic commits to notify KMD
|
||||
* of update events. See `vblank_control_worker()`.
|
||||
*/
|
||||
if (acrtc_attach->dm_irq_params.allow_sr_entry &&
|
||||
#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
|
||||
!amdgpu_dm_crc_window_is_activated(acrtc_state->base.crtc) &&
|
||||
#endif
|
||||
(current_ts - psr->psr_dirty_rects_change_timestamp_ns) > 500000000) {
|
||||
if (pr->replay_feature_enabled && !pr->replay_allow_active)
|
||||
amdgpu_dm_replay_enable(acrtc_state->stream, true);
|
||||
if (psr->psr_version >= DC_PSR_VERSION_SU_1 &&
|
||||
!psr->psr_allow_active && !aconn->disallow_edp_enter_psr)
|
||||
amdgpu_dm_psr_enable(acrtc_state->stream);
|
||||
}
|
||||
} else {
|
||||
acrtc_attach->dm_irq_params.allow_sr_entry = false;
|
||||
}
|
||||
}
|
||||
|
||||
static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
|
||||
struct drm_device *dev,
|
||||
struct amdgpu_display_manager *dm,
|
||||
@ -9028,7 +9078,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
|
||||
* during the PSR-SU was disabled.
|
||||
*/
|
||||
if (acrtc_state->stream->link->psr_settings.psr_version >= DC_PSR_VERSION_SU_1 &&
|
||||
acrtc_attach->dm_irq_params.allow_psr_entry &&
|
||||
acrtc_attach->dm_irq_params.allow_sr_entry &&
|
||||
#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
|
||||
!amdgpu_dm_crc_window_is_activated(acrtc_state->base.crtc) &&
|
||||
#endif
|
||||
@ -9203,9 +9253,12 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
|
||||
bundle->stream_update.abm_level = &acrtc_state->abm_level;
|
||||
|
||||
mutex_lock(&dm->dc_lock);
|
||||
if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
|
||||
acrtc_state->stream->link->psr_settings.psr_allow_active)
|
||||
amdgpu_dm_psr_disable(acrtc_state->stream);
|
||||
if (acrtc_state->update_type > UPDATE_TYPE_FAST) {
|
||||
if (acrtc_state->stream->link->replay_settings.replay_allow_active)
|
||||
amdgpu_dm_replay_disable(acrtc_state->stream);
|
||||
if (acrtc_state->stream->link->psr_settings.psr_allow_active)
|
||||
amdgpu_dm_psr_disable(acrtc_state->stream);
|
||||
}
|
||||
mutex_unlock(&dm->dc_lock);
|
||||
|
||||
/*
|
||||
@ -9246,57 +9299,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
|
||||
dm_update_pflip_irq_state(drm_to_adev(dev),
|
||||
acrtc_attach);
|
||||
|
||||
if (acrtc_state->update_type > UPDATE_TYPE_FAST) {
|
||||
if (acrtc_state->stream->link->replay_settings.config.replay_supported &&
|
||||
!acrtc_state->stream->link->replay_settings.replay_feature_enabled) {
|
||||
struct amdgpu_dm_connector *aconn =
|
||||
(struct amdgpu_dm_connector *)acrtc_state->stream->dm_stream_context;
|
||||
amdgpu_dm_link_setup_replay(acrtc_state->stream->link, aconn);
|
||||
} else if (acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
|
||||
!acrtc_state->stream->link->psr_settings.psr_feature_enabled) {
|
||||
|
||||
struct amdgpu_dm_connector *aconn = (struct amdgpu_dm_connector *)
|
||||
acrtc_state->stream->dm_stream_context;
|
||||
|
||||
if (!aconn->disallow_edp_enter_psr)
|
||||
amdgpu_dm_link_setup_psr(acrtc_state->stream);
|
||||
}
|
||||
}
|
||||
|
||||
/* Decrement skip count when PSR is enabled and we're doing fast updates. */
|
||||
if (acrtc_state->update_type == UPDATE_TYPE_FAST &&
|
||||
acrtc_state->stream->link->psr_settings.psr_feature_enabled) {
|
||||
struct amdgpu_dm_connector *aconn =
|
||||
(struct amdgpu_dm_connector *)acrtc_state->stream->dm_stream_context;
|
||||
|
||||
if (aconn->psr_skip_count > 0)
|
||||
aconn->psr_skip_count--;
|
||||
|
||||
/* Allow PSR when skip count is 0. */
|
||||
acrtc_attach->dm_irq_params.allow_psr_entry = !aconn->psr_skip_count;
|
||||
|
||||
/*
|
||||
* If sink supports PSR SU, there is no need to rely on
|
||||
* a vblank event disable request to enable PSR. PSR SU
|
||||
* can be enabled immediately once OS demonstrates an
|
||||
* adequate number of fast atomic commits to notify KMD
|
||||
* of update events. See `vblank_control_worker()`.
|
||||
*/
|
||||
if (acrtc_state->stream->link->psr_settings.psr_version >= DC_PSR_VERSION_SU_1 &&
|
||||
acrtc_attach->dm_irq_params.allow_psr_entry &&
|
||||
#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
|
||||
!amdgpu_dm_crc_window_is_activated(acrtc_state->base.crtc) &&
|
||||
#endif
|
||||
!acrtc_state->stream->link->psr_settings.psr_allow_active &&
|
||||
!aconn->disallow_edp_enter_psr &&
|
||||
(timestamp_ns -
|
||||
acrtc_state->stream->link->psr_settings.psr_dirty_rects_change_timestamp_ns) >
|
||||
500000000)
|
||||
amdgpu_dm_psr_enable(acrtc_state->stream);
|
||||
} else {
|
||||
acrtc_attach->dm_irq_params.allow_psr_entry = false;
|
||||
}
|
||||
|
||||
amdgpu_dm_enable_self_refresh(acrtc_attach, acrtc_state, timestamp_ns);
|
||||
mutex_unlock(&dm->dc_lock);
|
||||
}
|
||||
|
||||
@ -12080,7 +12083,7 @@ static int parse_amd_vsdb(struct amdgpu_dm_connector *aconnector,
|
||||
break;
|
||||
}
|
||||
|
||||
while (j < EDID_LENGTH) {
|
||||
while (j < EDID_LENGTH - sizeof(struct amd_vsdb_block)) {
|
||||
struct amd_vsdb_block *amd_vsdb = (struct amd_vsdb_block *)&edid_ext[j];
|
||||
unsigned int ieeeId = (amd_vsdb->ieee_id[2] << 16) | (amd_vsdb->ieee_id[1] << 8) | (amd_vsdb->ieee_id[0]);
|
||||
|
||||
|
@ -727,7 +727,7 @@ struct amdgpu_dm_connector {
|
||||
/* Cached display modes */
|
||||
struct drm_display_mode freesync_vid_base;
|
||||
|
||||
int psr_skip_count;
|
||||
int sr_skip_count;
|
||||
bool disallow_edp_enter_psr;
|
||||
|
||||
/* Record progress status of mst*/
|
||||
|
@ -266,11 +266,10 @@ static void amdgpu_dm_crtc_vblank_control_worker(struct work_struct *work)
|
||||
* where the SU region is the full hactive*vactive region. See
|
||||
* fill_dc_dirty_rects().
|
||||
*/
|
||||
if (vblank_work->stream && vblank_work->stream->link) {
|
||||
if (vblank_work->stream && vblank_work->stream->link && vblank_work->acrtc) {
|
||||
amdgpu_dm_crtc_set_panel_sr_feature(
|
||||
vblank_work, vblank_work->enable,
|
||||
vblank_work->acrtc->dm_irq_params.allow_psr_entry ||
|
||||
vblank_work->stream->link->replay_settings.replay_feature_enabled);
|
||||
vblank_work->acrtc->dm_irq_params.allow_sr_entry);
|
||||
}
|
||||
|
||||
if (dm->active_vblank_irq_count == 0) {
|
||||
|
@ -33,7 +33,7 @@ struct dm_irq_params {
|
||||
struct mod_vrr_params vrr_params;
|
||||
struct dc_stream_state *stream;
|
||||
int active_planes;
|
||||
bool allow_psr_entry;
|
||||
bool allow_sr_entry;
|
||||
struct mod_freesync_config freesync_config;
|
||||
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
|
@ -3122,14 +3122,12 @@ static enum bp_result bios_parser_get_vram_info(
|
||||
struct dc_vram_info *info)
|
||||
{
|
||||
struct bios_parser *bp = BP_FROM_DCB(dcb);
|
||||
static enum bp_result result = BP_RESULT_BADBIOSTABLE;
|
||||
enum bp_result result = BP_RESULT_BADBIOSTABLE;
|
||||
struct atom_common_table_header *header;
|
||||
struct atom_data_revision revision;
|
||||
|
||||
// vram info moved to umc_info for DCN4x
|
||||
if (dcb->ctx->dce_version >= DCN_VERSION_4_01 &&
|
||||
dcb->ctx->dce_version < DCN_VERSION_MAX &&
|
||||
info && DATA_TABLES(umc_info)) {
|
||||
if (info && DATA_TABLES(umc_info)) {
|
||||
header = GET_IMAGE(struct atom_common_table_header,
|
||||
DATA_TABLES(umc_info));
|
||||
|
||||
|
@ -265,6 +265,9 @@ struct dc_state *dc_state_create_copy(struct dc_state *src_state)
|
||||
dc_state_copy_internal(new_state, src_state);
|
||||
|
||||
#ifdef CONFIG_DRM_AMD_DC_FP
|
||||
new_state->bw_ctx.dml2 = NULL;
|
||||
new_state->bw_ctx.dml2_dc_power_source = NULL;
|
||||
|
||||
if (src_state->bw_ctx.dml2 &&
|
||||
!dml2_create_copy(&new_state->bw_ctx.dml2, src_state->bw_ctx.dml2)) {
|
||||
dc_state_release(new_state);
|
||||
|
@ -8,6 +8,7 @@
|
||||
#include "dml2_pmo_dcn4_fams2.h"
|
||||
|
||||
static const double MIN_VACTIVE_MARGIN_PCT = 0.25; // We need more than non-zero margin because DET buffer granularity can alter vactive latency hiding
|
||||
static const double MIN_BLANK_STUTTER_FACTOR = 3.0;
|
||||
|
||||
static const struct dml2_pmo_pstate_strategy base_strategy_list_1_display[] = {
|
||||
// VActive Preferred
|
||||
@ -2139,6 +2140,7 @@ bool pmo_dcn4_fams2_init_for_stutter(struct dml2_pmo_init_for_stutter_in_out *in
|
||||
struct dml2_pmo_instance *pmo = in_out->instance;
|
||||
bool stutter_period_meets_z8_eco = true;
|
||||
bool z8_stutter_optimization_too_expensive = false;
|
||||
bool stutter_optimization_too_expensive = false;
|
||||
double line_time_us, vblank_nom_time_us;
|
||||
|
||||
unsigned int i;
|
||||
@ -2160,10 +2162,15 @@ bool pmo_dcn4_fams2_init_for_stutter(struct dml2_pmo_init_for_stutter_in_out *in
|
||||
line_time_us = (double)in_out->base_display_config->display_config.stream_descriptors[i].timing.h_total / (in_out->base_display_config->display_config.stream_descriptors[i].timing.pixel_clock_khz * 1000) * 1000000;
|
||||
vblank_nom_time_us = line_time_us * in_out->base_display_config->display_config.stream_descriptors[i].timing.vblank_nom;
|
||||
|
||||
if (vblank_nom_time_us < pmo->soc_bb->power_management_parameters.z8_stutter_exit_latency_us) {
|
||||
if (vblank_nom_time_us < pmo->soc_bb->power_management_parameters.z8_stutter_exit_latency_us * MIN_BLANK_STUTTER_FACTOR) {
|
||||
z8_stutter_optimization_too_expensive = true;
|
||||
break;
|
||||
}
|
||||
|
||||
if (vblank_nom_time_us < pmo->soc_bb->power_management_parameters.stutter_enter_plus_exit_latency_us * MIN_BLANK_STUTTER_FACTOR) {
|
||||
stutter_optimization_too_expensive = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
pmo->scratch.pmo_dcn4.num_stutter_candidates = 0;
|
||||
@ -2179,7 +2186,7 @@ bool pmo_dcn4_fams2_init_for_stutter(struct dml2_pmo_init_for_stutter_in_out *in
|
||||
pmo->scratch.pmo_dcn4.z8_vblank_optimizable = false;
|
||||
}
|
||||
|
||||
if (pmo->soc_bb->power_management_parameters.stutter_enter_plus_exit_latency_us > 0) {
|
||||
if (!stutter_optimization_too_expensive && pmo->soc_bb->power_management_parameters.stutter_enter_plus_exit_latency_us > 0) {
|
||||
pmo->scratch.pmo_dcn4.optimal_vblank_reserved_time_for_stutter_us[pmo->scratch.pmo_dcn4.num_stutter_candidates] = (unsigned int)pmo->soc_bb->power_management_parameters.stutter_enter_plus_exit_latency_us;
|
||||
pmo->scratch.pmo_dcn4.num_stutter_candidates++;
|
||||
}
|
||||
|
@ -1132,7 +1132,7 @@ static int smu_v14_0_common_get_dpm_level_count(struct smu_context *smu,
|
||||
static int smu_v14_0_0_print_clk_levels(struct smu_context *smu,
|
||||
enum smu_clk_type clk_type, char *buf)
|
||||
{
|
||||
int i, size = 0, ret = 0;
|
||||
int i, idx, ret = 0, size = 0;
|
||||
uint32_t cur_value = 0, value = 0, count = 0;
|
||||
uint32_t min, max;
|
||||
|
||||
@ -1168,7 +1168,8 @@ static int smu_v14_0_0_print_clk_levels(struct smu_context *smu,
|
||||
break;
|
||||
|
||||
for (i = 0; i < count; i++) {
|
||||
ret = smu_v14_0_common_get_dpm_freq_by_index(smu, clk_type, i, &value);
|
||||
idx = (clk_type == SMU_MCLK) ? (count - i - 1) : i;
|
||||
ret = smu_v14_0_common_get_dpm_freq_by_index(smu, clk_type, idx, &value);
|
||||
if (ret)
|
||||
break;
|
||||
|
||||
|
@ -125,6 +125,9 @@
|
||||
#define TC358768_DSI_CONFW_MODE_CLR (6 << 29)
|
||||
#define TC358768_DSI_CONFW_ADDR_DSI_CONTROL (0x3 << 24)
|
||||
|
||||
/* TC358768_DSICMD_TX (0x0600) register */
|
||||
#define TC358768_DSI_CMDTX_DC_START BIT(0)
|
||||
|
||||
static const char * const tc358768_supplies[] = {
|
||||
"vddc", "vddmipi", "vddio"
|
||||
};
|
||||
@ -229,6 +232,21 @@ static void tc358768_update_bits(struct tc358768_priv *priv, u32 reg, u32 mask,
|
||||
tc358768_write(priv, reg, tmp);
|
||||
}
|
||||
|
||||
static void tc358768_dsicmd_tx(struct tc358768_priv *priv)
|
||||
{
|
||||
u32 val;
|
||||
|
||||
/* start transfer */
|
||||
tc358768_write(priv, TC358768_DSICMD_TX, TC358768_DSI_CMDTX_DC_START);
|
||||
if (priv->error)
|
||||
return;
|
||||
|
||||
/* wait transfer completion */
|
||||
priv->error = regmap_read_poll_timeout(priv->regmap, TC358768_DSICMD_TX, val,
|
||||
(val & TC358768_DSI_CMDTX_DC_START) == 0,
|
||||
100, 100000);
|
||||
}
|
||||
|
||||
static int tc358768_sw_reset(struct tc358768_priv *priv)
|
||||
{
|
||||
/* Assert Reset */
|
||||
@ -516,8 +534,7 @@ static ssize_t tc358768_dsi_host_transfer(struct mipi_dsi_host *host,
|
||||
}
|
||||
}
|
||||
|
||||
/* start transfer */
|
||||
tc358768_write(priv, TC358768_DSICMD_TX, 1);
|
||||
tc358768_dsicmd_tx(priv);
|
||||
|
||||
ret = tc358768_clear_error(priv);
|
||||
if (ret)
|
||||
|
@ -928,7 +928,7 @@ intel_enable_tv(struct intel_atomic_state *state,
|
||||
const struct intel_crtc_state *pipe_config,
|
||||
const struct drm_connector_state *conn_state)
|
||||
{
|
||||
struct intel_display *display = to_intel_display(state);
|
||||
struct intel_display *display = to_intel_display(encoder);
|
||||
|
||||
/* Prevents vblank waits from timing out in intel_tv_detect_type() */
|
||||
intel_crtc_wait_for_next_vblank(to_intel_crtc(pipe_config->uapi.crtc));
|
||||
@ -942,7 +942,7 @@ intel_disable_tv(struct intel_atomic_state *state,
|
||||
const struct intel_crtc_state *old_crtc_state,
|
||||
const struct drm_connector_state *old_conn_state)
|
||||
{
|
||||
struct intel_display *display = to_intel_display(state);
|
||||
struct intel_display *display = to_intel_display(encoder);
|
||||
|
||||
intel_de_rmw(display, TV_CTL, TV_ENC_ENABLE, 0);
|
||||
}
|
||||
|
@ -80,6 +80,7 @@ int intel_gsc_fw_get_binary_info(struct intel_uc_fw *gsc_fw, const void *data, s
|
||||
const struct intel_gsc_cpd_header_v2 *cpd_header = NULL;
|
||||
const struct intel_gsc_cpd_entry *cpd_entry = NULL;
|
||||
const struct intel_gsc_manifest_header *manifest;
|
||||
struct intel_uc_fw_ver min_ver = { 0 };
|
||||
size_t min_size = sizeof(*layout);
|
||||
int i;
|
||||
|
||||
@ -212,33 +213,46 @@ int intel_gsc_fw_get_binary_info(struct intel_uc_fw *gsc_fw, const void *data, s
|
||||
}
|
||||
}
|
||||
|
||||
if (IS_ARROWLAKE(gt->i915)) {
|
||||
/*
|
||||
* ARL SKUs require newer firmwares, but the blob is actually common
|
||||
* across all MTL and ARL SKUs, so we need to do an explicit version check
|
||||
* here rather than using a separate table entry. If a too old version
|
||||
* is found, then just don't use GSC rather than aborting the driver load.
|
||||
* Note that the major number in the GSC FW version is used to indicate
|
||||
* the platform, so we expect it to always be 102 for MTL/ARL binaries.
|
||||
*/
|
||||
if (IS_ARROWLAKE_S(gt->i915))
|
||||
min_ver = (struct intel_uc_fw_ver){ 102, 0, 10, 1878 };
|
||||
else if (IS_ARROWLAKE_H(gt->i915) || IS_ARROWLAKE_U(gt->i915))
|
||||
min_ver = (struct intel_uc_fw_ver){ 102, 1, 15, 1926 };
|
||||
|
||||
if (IS_METEORLAKE(gt->i915) && gsc->release.major != 102) {
|
||||
gt_info(gt, "Invalid GSC firmware for MTL/ARL, got %d.%d.%d.%d but need 102.x.x.x",
|
||||
gsc->release.major, gsc->release.minor,
|
||||
gsc->release.patch, gsc->release.build);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (min_ver.major) {
|
||||
bool too_old = false;
|
||||
|
||||
/*
|
||||
* ARL requires a newer firmware than MTL did (102.0.10.1878) but the
|
||||
* firmware is actually common. So, need to do an explicit version check
|
||||
* here rather than using a separate table entry. And if the older
|
||||
* MTL-only version is found, then just don't use GSC rather than aborting
|
||||
* the driver load.
|
||||
*/
|
||||
if (gsc->release.major < 102) {
|
||||
if (gsc->release.minor < min_ver.minor) {
|
||||
too_old = true;
|
||||
} else if (gsc->release.major == 102) {
|
||||
if (gsc->release.minor == 0) {
|
||||
if (gsc->release.patch < 10) {
|
||||
} else if (gsc->release.minor == min_ver.minor) {
|
||||
if (gsc->release.patch < min_ver.patch) {
|
||||
too_old = true;
|
||||
} else if (gsc->release.patch == min_ver.patch) {
|
||||
if (gsc->release.build < min_ver.build)
|
||||
too_old = true;
|
||||
} else if (gsc->release.patch == 10) {
|
||||
if (gsc->release.build < 1878)
|
||||
too_old = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (too_old) {
|
||||
gt_info(gt, "GSC firmware too old for ARL, got %d.%d.%d.%d but need at least 102.0.10.1878",
|
||||
gt_info(gt, "GSC firmware too old for ARL, got %d.%d.%d.%d but need at least %d.%d.%d.%d",
|
||||
gsc->release.major, gsc->release.minor,
|
||||
gsc->release.patch, gsc->release.build);
|
||||
gsc->release.patch, gsc->release.build,
|
||||
min_ver.major, min_ver.minor,
|
||||
min_ver.patch, min_ver.build);
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
@ -540,8 +540,12 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
|
||||
#define IS_LUNARLAKE(i915) (0 && i915)
|
||||
#define IS_BATTLEMAGE(i915) (0 && i915)
|
||||
|
||||
#define IS_ARROWLAKE(i915) \
|
||||
IS_SUBPLATFORM(i915, INTEL_METEORLAKE, INTEL_SUBPLATFORM_ARL)
|
||||
#define IS_ARROWLAKE_H(i915) \
|
||||
IS_SUBPLATFORM(i915, INTEL_METEORLAKE, INTEL_SUBPLATFORM_ARL_H)
|
||||
#define IS_ARROWLAKE_U(i915) \
|
||||
IS_SUBPLATFORM(i915, INTEL_METEORLAKE, INTEL_SUBPLATFORM_ARL_U)
|
||||
#define IS_ARROWLAKE_S(i915) \
|
||||
IS_SUBPLATFORM(i915, INTEL_METEORLAKE, INTEL_SUBPLATFORM_ARL_S)
|
||||
#define IS_DG2_G10(i915) \
|
||||
IS_SUBPLATFORM(i915, INTEL_DG2, INTEL_SUBPLATFORM_G10)
|
||||
#define IS_DG2_G11(i915) \
|
||||
|
@ -200,8 +200,16 @@ static const u16 subplatform_g12_ids[] = {
|
||||
INTEL_DG2_G12_IDS(ID),
|
||||
};
|
||||
|
||||
static const u16 subplatform_arl_ids[] = {
|
||||
INTEL_ARL_IDS(ID),
|
||||
static const u16 subplatform_arl_h_ids[] = {
|
||||
INTEL_ARL_H_IDS(ID),
|
||||
};
|
||||
|
||||
static const u16 subplatform_arl_u_ids[] = {
|
||||
INTEL_ARL_U_IDS(ID),
|
||||
};
|
||||
|
||||
static const u16 subplatform_arl_s_ids[] = {
|
||||
INTEL_ARL_S_IDS(ID),
|
||||
};
|
||||
|
||||
static bool find_devid(u16 id, const u16 *p, unsigned int num)
|
||||
@ -261,9 +269,15 @@ static void intel_device_info_subplatform_init(struct drm_i915_private *i915)
|
||||
} else if (find_devid(devid, subplatform_g12_ids,
|
||||
ARRAY_SIZE(subplatform_g12_ids))) {
|
||||
mask = BIT(INTEL_SUBPLATFORM_G12);
|
||||
} else if (find_devid(devid, subplatform_arl_ids,
|
||||
ARRAY_SIZE(subplatform_arl_ids))) {
|
||||
mask = BIT(INTEL_SUBPLATFORM_ARL);
|
||||
} else if (find_devid(devid, subplatform_arl_h_ids,
|
||||
ARRAY_SIZE(subplatform_arl_h_ids))) {
|
||||
mask = BIT(INTEL_SUBPLATFORM_ARL_H);
|
||||
} else if (find_devid(devid, subplatform_arl_u_ids,
|
||||
ARRAY_SIZE(subplatform_arl_u_ids))) {
|
||||
mask = BIT(INTEL_SUBPLATFORM_ARL_U);
|
||||
} else if (find_devid(devid, subplatform_arl_s_ids,
|
||||
ARRAY_SIZE(subplatform_arl_s_ids))) {
|
||||
mask = BIT(INTEL_SUBPLATFORM_ARL_S);
|
||||
}
|
||||
|
||||
GEM_BUG_ON(mask & ~INTEL_SUBPLATFORM_MASK);
|
||||
|
@ -128,7 +128,9 @@ enum intel_platform {
|
||||
#define INTEL_SUBPLATFORM_RPLU 2
|
||||
|
||||
/* MTL */
|
||||
#define INTEL_SUBPLATFORM_ARL 0
|
||||
#define INTEL_SUBPLATFORM_ARL_H 0
|
||||
#define INTEL_SUBPLATFORM_ARL_U 1
|
||||
#define INTEL_SUBPLATFORM_ARL_S 2
|
||||
|
||||
enum intel_ppgtt_type {
|
||||
INTEL_PPGTT_NONE = I915_GEM_PPGTT_NONE,
|
||||
|
@ -992,7 +992,7 @@ r535_dp_train_target(struct nvkm_outp *outp, u8 target, bool mst, u8 link_nr, u8
|
||||
ctrl->data = data;
|
||||
|
||||
ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl));
|
||||
if (ret == -EAGAIN && ctrl->retryTimeMs) {
|
||||
if ((ret == -EAGAIN || ret == -EBUSY) && ctrl->retryTimeMs) {
|
||||
/*
|
||||
* Device (likely an eDP panel) isn't ready yet, wait for the time specified
|
||||
* by GSP before retrying again
|
||||
@ -1060,33 +1060,44 @@ r535_dp_aux_xfer(struct nvkm_outp *outp, u8 type, u32 addr, u8 *data, u8 *psize)
|
||||
NV0073_CTRL_DP_AUXCH_CTRL_PARAMS *ctrl;
|
||||
u8 size = *psize;
|
||||
int ret;
|
||||
int retries;
|
||||
|
||||
ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom, NV0073_CTRL_CMD_DP_AUXCH_CTRL, sizeof(*ctrl));
|
||||
if (IS_ERR(ctrl))
|
||||
return PTR_ERR(ctrl);
|
||||
for (retries = 0; retries < 3; ++retries) {
|
||||
ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom, NV0073_CTRL_CMD_DP_AUXCH_CTRL, sizeof(*ctrl));
|
||||
if (IS_ERR(ctrl))
|
||||
return PTR_ERR(ctrl);
|
||||
|
||||
ctrl->subDeviceInstance = 0;
|
||||
ctrl->displayId = BIT(outp->index);
|
||||
ctrl->bAddrOnly = !size;
|
||||
ctrl->cmd = type;
|
||||
if (ctrl->bAddrOnly) {
|
||||
ctrl->cmd = NVDEF_SET(ctrl->cmd, NV0073_CTRL, DP_AUXCH_CMD, REQ_TYPE, WRITE);
|
||||
ctrl->cmd = NVDEF_SET(ctrl->cmd, NV0073_CTRL, DP_AUXCH_CMD, I2C_MOT, FALSE);
|
||||
ctrl->subDeviceInstance = 0;
|
||||
ctrl->displayId = BIT(outp->index);
|
||||
ctrl->bAddrOnly = !size;
|
||||
ctrl->cmd = type;
|
||||
if (ctrl->bAddrOnly) {
|
||||
ctrl->cmd = NVDEF_SET(ctrl->cmd, NV0073_CTRL, DP_AUXCH_CMD, REQ_TYPE, WRITE);
|
||||
ctrl->cmd = NVDEF_SET(ctrl->cmd, NV0073_CTRL, DP_AUXCH_CMD, I2C_MOT, FALSE);
|
||||
}
|
||||
ctrl->addr = addr;
|
||||
ctrl->size = !ctrl->bAddrOnly ? (size - 1) : 0;
|
||||
memcpy(ctrl->data, data, size);
|
||||
|
||||
ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl));
|
||||
if ((ret == -EAGAIN || ret == -EBUSY) && ctrl->retryTimeMs) {
|
||||
/*
|
||||
* Device (likely an eDP panel) isn't ready yet, wait for the time specified
|
||||
* by GSP before retrying again
|
||||
*/
|
||||
nvkm_debug(&disp->engine.subdev,
|
||||
"Waiting %dms for GSP LT panel delay before retrying in AUX\n",
|
||||
ctrl->retryTimeMs);
|
||||
msleep(ctrl->retryTimeMs);
|
||||
nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
|
||||
} else {
|
||||
memcpy(data, ctrl->data, size);
|
||||
*psize = ctrl->size;
|
||||
ret = ctrl->replyType;
|
||||
nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
|
||||
break;
|
||||
}
|
||||
}
|
||||
ctrl->addr = addr;
|
||||
ctrl->size = !ctrl->bAddrOnly ? (size - 1) : 0;
|
||||
memcpy(ctrl->data, data, size);
|
||||
|
||||
ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl));
|
||||
if (ret) {
|
||||
nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
|
||||
return ret;
|
||||
}
|
||||
|
||||
memcpy(data, ctrl->data, size);
|
||||
*psize = ctrl->size;
|
||||
ret = ctrl->replyType;
|
||||
nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -89,11 +89,6 @@ nvkm_falcon_fw_boot(struct nvkm_falcon_fw *fw, struct nvkm_subdev *user,
|
||||
nvkm_falcon_fw_dtor_sigs(fw);
|
||||
}
|
||||
|
||||
/* after last write to the img, sync dma mappings */
|
||||
dma_sync_single_for_device(fw->fw.device->dev,
|
||||
fw->fw.phys,
|
||||
sg_dma_len(&fw->fw.mem.sgl),
|
||||
DMA_TO_DEVICE);
|
||||
|
||||
FLCNFW_DBG(fw, "resetting");
|
||||
fw->func->reset(fw);
|
||||
@ -105,6 +100,12 @@ nvkm_falcon_fw_boot(struct nvkm_falcon_fw *fw, struct nvkm_subdev *user,
|
||||
goto done;
|
||||
}
|
||||
|
||||
/* after last write to the img, sync dma mappings */
|
||||
dma_sync_single_for_device(fw->fw.device->dev,
|
||||
fw->fw.phys,
|
||||
sg_dma_len(&fw->fw.mem.sgl),
|
||||
DMA_TO_DEVICE);
|
||||
|
||||
ret = fw->func->load(fw);
|
||||
if (ret)
|
||||
goto done;
|
||||
|
@ -78,7 +78,7 @@ r535_rpc_status_to_errno(uint32_t rpc_status)
|
||||
switch (rpc_status) {
|
||||
case 0x55: /* NV_ERR_NOT_READY */
|
||||
case 0x66: /* NV_ERR_TIMEOUT_RETRY */
|
||||
return -EAGAIN;
|
||||
return -EBUSY;
|
||||
case 0x51: /* NV_ERR_NO_MEMORY */
|
||||
return -ENOMEM;
|
||||
default:
|
||||
@ -601,7 +601,7 @@ r535_gsp_rpc_rm_alloc_push(struct nvkm_gsp_object *object, void *argv, u32 repc)
|
||||
|
||||
if (rpc->status) {
|
||||
ret = ERR_PTR(r535_rpc_status_to_errno(rpc->status));
|
||||
if (PTR_ERR(ret) != -EAGAIN)
|
||||
if (PTR_ERR(ret) != -EAGAIN && PTR_ERR(ret) != -EBUSY)
|
||||
nvkm_error(&gsp->subdev, "RM_ALLOC: 0x%x\n", rpc->status);
|
||||
} else {
|
||||
ret = repc ? rpc->params : NULL;
|
||||
@ -660,7 +660,7 @@ r535_gsp_rpc_rm_ctrl_push(struct nvkm_gsp_object *object, void **argv, u32 repc)
|
||||
|
||||
if (rpc->status) {
|
||||
ret = r535_rpc_status_to_errno(rpc->status);
|
||||
if (ret != -EAGAIN)
|
||||
if (ret != -EAGAIN && ret != -EBUSY)
|
||||
nvkm_error(&gsp->subdev, "cli:0x%08x obj:0x%08x ctrl cmd:0x%08x failed: 0x%08x\n",
|
||||
object->client->object.handle, object->handle, rpc->cmd, rpc->status);
|
||||
}
|
||||
|
@ -990,6 +990,8 @@ panthor_vm_map_pages(struct panthor_vm *vm, u64 iova, int prot,
|
||||
|
||||
if (!size)
|
||||
break;
|
||||
|
||||
offset = 0;
|
||||
}
|
||||
|
||||
return panthor_vm_flush_range(vm, start_iova, iova - start_iova);
|
||||
|
@ -1093,10 +1093,10 @@ static int vop_plane_atomic_async_check(struct drm_plane *plane,
|
||||
if (!plane->state->fb)
|
||||
return -EINVAL;
|
||||
|
||||
if (state)
|
||||
crtc_state = drm_atomic_get_existing_crtc_state(state,
|
||||
new_plane_state->crtc);
|
||||
else /* Special case for asynchronous cursor updates. */
|
||||
crtc_state = drm_atomic_get_existing_crtc_state(state, new_plane_state->crtc);
|
||||
|
||||
/* Special case for asynchronous cursor updates. */
|
||||
if (!crtc_state)
|
||||
crtc_state = plane->crtc->state;
|
||||
|
||||
return drm_atomic_helper_check_plane_state(plane->state, crtc_state,
|
||||
|
@ -1265,6 +1265,8 @@ static int vmw_framebuffer_surface_create_handle(struct drm_framebuffer *fb,
|
||||
struct vmw_framebuffer_surface *vfbs = vmw_framebuffer_to_vfbs(fb);
|
||||
struct vmw_bo *bo = vmw_user_object_buffer(&vfbs->uo);
|
||||
|
||||
if (WARN_ON(!bo))
|
||||
return -EINVAL;
|
||||
return drm_gem_handle_create(file_priv, &bo->tbo.base, handle);
|
||||
}
|
||||
|
||||
|
@ -886,8 +886,8 @@ int xe_bo_evict_pinned(struct xe_bo *bo)
|
||||
if (WARN_ON(!xe_bo_is_pinned(bo)))
|
||||
return -EINVAL;
|
||||
|
||||
if (WARN_ON(!xe_bo_is_vram(bo)))
|
||||
return -EINVAL;
|
||||
if (!xe_bo_is_vram(bo))
|
||||
return 0;
|
||||
|
||||
ret = ttm_bo_mem_space(&bo->ttm, &placement, &new_mem, &ctx);
|
||||
if (ret)
|
||||
@ -937,6 +937,7 @@ int xe_bo_restore_pinned(struct xe_bo *bo)
|
||||
.interruptible = false,
|
||||
};
|
||||
struct ttm_resource *new_mem;
|
||||
struct ttm_place *place = &bo->placements[0];
|
||||
int ret;
|
||||
|
||||
xe_bo_assert_held(bo);
|
||||
@ -947,9 +948,15 @@ int xe_bo_restore_pinned(struct xe_bo *bo)
|
||||
if (WARN_ON(!xe_bo_is_pinned(bo)))
|
||||
return -EINVAL;
|
||||
|
||||
if (WARN_ON(xe_bo_is_vram(bo) || !bo->ttm.ttm))
|
||||
if (WARN_ON(xe_bo_is_vram(bo)))
|
||||
return -EINVAL;
|
||||
|
||||
if (WARN_ON(!bo->ttm.ttm && !xe_bo_is_stolen(bo)))
|
||||
return -EINVAL;
|
||||
|
||||
if (!mem_type_is_vram(place->mem_type))
|
||||
return 0;
|
||||
|
||||
ret = ttm_bo_mem_space(&bo->ttm, &bo->placement, &new_mem, &ctx);
|
||||
if (ret)
|
||||
return ret;
|
||||
@ -1719,6 +1726,7 @@ int xe_bo_pin_external(struct xe_bo *bo)
|
||||
|
||||
int xe_bo_pin(struct xe_bo *bo)
|
||||
{
|
||||
struct ttm_place *place = &bo->placements[0];
|
||||
struct xe_device *xe = xe_bo_device(bo);
|
||||
int err;
|
||||
|
||||
@ -1749,21 +1757,21 @@ int xe_bo_pin(struct xe_bo *bo)
|
||||
*/
|
||||
if (IS_DGFX(xe) && !(IS_ENABLED(CONFIG_DRM_XE_DEBUG) &&
|
||||
bo->flags & XE_BO_FLAG_INTERNAL_TEST)) {
|
||||
struct ttm_place *place = &(bo->placements[0]);
|
||||
|
||||
if (mem_type_is_vram(place->mem_type)) {
|
||||
xe_assert(xe, place->flags & TTM_PL_FLAG_CONTIGUOUS);
|
||||
|
||||
place->fpfn = (xe_bo_addr(bo, 0, PAGE_SIZE) -
|
||||
vram_region_gpu_offset(bo->ttm.resource)) >> PAGE_SHIFT;
|
||||
place->lpfn = place->fpfn + (bo->size >> PAGE_SHIFT);
|
||||
|
||||
spin_lock(&xe->pinned.lock);
|
||||
list_add_tail(&bo->pinned_link, &xe->pinned.kernel_bo_present);
|
||||
spin_unlock(&xe->pinned.lock);
|
||||
}
|
||||
}
|
||||
|
||||
if (mem_type_is_vram(place->mem_type) || bo->flags & XE_BO_FLAG_GGTT) {
|
||||
spin_lock(&xe->pinned.lock);
|
||||
list_add_tail(&bo->pinned_link, &xe->pinned.kernel_bo_present);
|
||||
spin_unlock(&xe->pinned.lock);
|
||||
}
|
||||
|
||||
ttm_bo_pin(&bo->ttm);
|
||||
|
||||
/*
|
||||
@ -1809,23 +1817,18 @@ void xe_bo_unpin_external(struct xe_bo *bo)
|
||||
|
||||
void xe_bo_unpin(struct xe_bo *bo)
|
||||
{
|
||||
struct ttm_place *place = &bo->placements[0];
|
||||
struct xe_device *xe = xe_bo_device(bo);
|
||||
|
||||
xe_assert(xe, !bo->ttm.base.import_attach);
|
||||
xe_assert(xe, xe_bo_is_pinned(bo));
|
||||
|
||||
if (IS_DGFX(xe) && !(IS_ENABLED(CONFIG_DRM_XE_DEBUG) &&
|
||||
bo->flags & XE_BO_FLAG_INTERNAL_TEST)) {
|
||||
struct ttm_place *place = &(bo->placements[0]);
|
||||
|
||||
if (mem_type_is_vram(place->mem_type)) {
|
||||
spin_lock(&xe->pinned.lock);
|
||||
xe_assert(xe, !list_empty(&bo->pinned_link));
|
||||
list_del_init(&bo->pinned_link);
|
||||
spin_unlock(&xe->pinned.lock);
|
||||
}
|
||||
if (mem_type_is_vram(place->mem_type) || bo->flags & XE_BO_FLAG_GGTT) {
|
||||
spin_lock(&xe->pinned.lock);
|
||||
xe_assert(xe, !list_empty(&bo->pinned_link));
|
||||
list_del_init(&bo->pinned_link);
|
||||
spin_unlock(&xe->pinned.lock);
|
||||
}
|
||||
|
||||
ttm_bo_unpin(&bo->ttm);
|
||||
}
|
||||
|
||||
|
@ -34,14 +34,22 @@ int xe_bo_evict_all(struct xe_device *xe)
|
||||
u8 id;
|
||||
int ret;
|
||||
|
||||
if (!IS_DGFX(xe))
|
||||
return 0;
|
||||
|
||||
/* User memory */
|
||||
for (mem_type = XE_PL_VRAM0; mem_type <= XE_PL_VRAM1; ++mem_type) {
|
||||
for (mem_type = XE_PL_TT; mem_type <= XE_PL_VRAM1; ++mem_type) {
|
||||
struct ttm_resource_manager *man =
|
||||
ttm_manager_type(bdev, mem_type);
|
||||
|
||||
/*
|
||||
* On igpu platforms with flat CCS we need to ensure we save and restore any CCS
|
||||
* state since this state lives inside graphics stolen memory which doesn't survive
|
||||
* hibernation.
|
||||
*
|
||||
* This can be further improved by only evicting objects that we know have actually
|
||||
* used a compression enabled PAT index.
|
||||
*/
|
||||
if (mem_type == XE_PL_TT && (IS_DGFX(xe) || !xe_device_has_flat_ccs(xe)))
|
||||
continue;
|
||||
|
||||
if (man) {
|
||||
ret = ttm_resource_manager_evict_all(bdev, man);
|
||||
if (ret)
|
||||
@ -125,9 +133,6 @@ int xe_bo_restore_kernel(struct xe_device *xe)
|
||||
struct xe_bo *bo;
|
||||
int ret;
|
||||
|
||||
if (!IS_DGFX(xe))
|
||||
return 0;
|
||||
|
||||
spin_lock(&xe->pinned.lock);
|
||||
for (;;) {
|
||||
bo = list_first_entry_or_null(&xe->pinned.evicted,
|
||||
@ -159,7 +164,6 @@ int xe_bo_restore_kernel(struct xe_device *xe)
|
||||
* should setup the iosys map.
|
||||
*/
|
||||
xe_assert(xe, !iosys_map_is_null(&bo->vmap));
|
||||
xe_assert(xe, xe_bo_is_vram(bo));
|
||||
|
||||
xe_bo_put(bo);
|
||||
|
||||
|
@ -203,14 +203,14 @@ retry:
|
||||
write_locked = false;
|
||||
}
|
||||
if (err)
|
||||
goto err_syncs;
|
||||
goto err_hw_exec_mode;
|
||||
|
||||
if (write_locked) {
|
||||
err = xe_vm_userptr_pin(vm);
|
||||
downgrade_write(&vm->lock);
|
||||
write_locked = false;
|
||||
if (err)
|
||||
goto err_hw_exec_mode;
|
||||
goto err_unlock_list;
|
||||
}
|
||||
|
||||
if (!args->num_batch_buffer) {
|
||||
|
@ -1206,9 +1206,11 @@ static int xe_oa_release(struct inode *inode, struct file *file)
|
||||
struct xe_oa_stream *stream = file->private_data;
|
||||
struct xe_gt *gt = stream->gt;
|
||||
|
||||
xe_pm_runtime_get(gt_to_xe(gt));
|
||||
mutex_lock(>->oa.gt_lock);
|
||||
xe_oa_destroy_locked(stream);
|
||||
mutex_unlock(>->oa.gt_lock);
|
||||
xe_pm_runtime_put(gt_to_xe(gt));
|
||||
|
||||
/* Release the reference the OA stream kept on the driver */
|
||||
drm_dev_put(>_to_xe(gt)->drm);
|
||||
|
@ -269,8 +269,6 @@ rdma_find_ndev_for_src_ip_rcu(struct net *net, const struct sockaddr *src_in)
|
||||
break;
|
||||
#endif
|
||||
}
|
||||
if (!ret && dev && is_vlan_dev(dev))
|
||||
dev = vlan_dev_real_dev(dev);
|
||||
return ret ? ERR_PTR(ret) : dev;
|
||||
}
|
||||
|
||||
|
@ -300,9 +300,6 @@ static void bnxt_re_shutdown(struct auxiliary_device *adev)
|
||||
struct bnxt_re_en_dev_info *en_info = auxiliary_get_drvdata(adev);
|
||||
struct bnxt_re_dev *rdev;
|
||||
|
||||
if (!en_info)
|
||||
return;
|
||||
|
||||
rdev = en_info->rdev;
|
||||
ib_unregister_device(&rdev->ibdev);
|
||||
bnxt_re_dev_uninit(rdev, BNXT_RE_COMPLETE_REMOVE);
|
||||
@ -316,9 +313,6 @@ static void bnxt_re_stop_irq(void *handle)
|
||||
struct bnxt_qplib_nq *nq;
|
||||
int indx;
|
||||
|
||||
if (!en_info)
|
||||
return;
|
||||
|
||||
rdev = en_info->rdev;
|
||||
rcfw = &rdev->rcfw;
|
||||
|
||||
@ -339,9 +333,6 @@ static void bnxt_re_start_irq(void *handle, struct bnxt_msix_entry *ent)
|
||||
struct bnxt_qplib_nq *nq;
|
||||
int indx, rc;
|
||||
|
||||
if (!en_info)
|
||||
return;
|
||||
|
||||
rdev = en_info->rdev;
|
||||
msix_ent = rdev->en_dev->msix_entries;
|
||||
rcfw = &rdev->rcfw;
|
||||
@ -1991,10 +1982,6 @@ static void bnxt_re_remove(struct auxiliary_device *adev)
|
||||
struct bnxt_re_dev *rdev;
|
||||
|
||||
mutex_lock(&bnxt_re_mutex);
|
||||
if (!en_info) {
|
||||
mutex_unlock(&bnxt_re_mutex);
|
||||
return;
|
||||
}
|
||||
rdev = en_info->rdev;
|
||||
|
||||
if (rdev)
|
||||
@ -2025,7 +2012,15 @@ static int bnxt_re_probe(struct auxiliary_device *adev,
|
||||
auxiliary_set_drvdata(adev, en_info);
|
||||
|
||||
rc = bnxt_re_add_device(adev, BNXT_RE_COMPLETE_INIT);
|
||||
if (rc)
|
||||
goto err;
|
||||
mutex_unlock(&bnxt_re_mutex);
|
||||
return 0;
|
||||
|
||||
err:
|
||||
mutex_unlock(&bnxt_re_mutex);
|
||||
kfree(en_info);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
@ -2035,9 +2030,6 @@ static int bnxt_re_suspend(struct auxiliary_device *adev, pm_message_t state)
|
||||
struct bnxt_en_dev *en_dev;
|
||||
struct bnxt_re_dev *rdev;
|
||||
|
||||
if (!en_info)
|
||||
return 0;
|
||||
|
||||
rdev = en_info->rdev;
|
||||
en_dev = en_info->en_dev;
|
||||
mutex_lock(&bnxt_re_mutex);
|
||||
@ -2082,9 +2074,6 @@ static int bnxt_re_resume(struct auxiliary_device *adev)
|
||||
struct bnxt_re_en_dev_info *en_info = auxiliary_get_drvdata(adev);
|
||||
struct bnxt_re_dev *rdev;
|
||||
|
||||
if (!en_info)
|
||||
return 0;
|
||||
|
||||
mutex_lock(&bnxt_re_mutex);
|
||||
/* L2 driver may invoke this callback during device recovery, resume.
|
||||
* reset. Current RoCE driver doesn't recover the device in case of
|
||||
|
@ -138,7 +138,7 @@ static int qcom_cpucp_mbox_probe(struct platform_device *pdev)
|
||||
return irq;
|
||||
|
||||
ret = devm_request_irq(dev, irq, qcom_cpucp_mbox_irq_fn,
|
||||
IRQF_TRIGGER_HIGH, "apss_cpucp_mbox", cpucp);
|
||||
IRQF_TRIGGER_HIGH | IRQF_NO_SUSPEND, "apss_cpucp_mbox", cpucp);
|
||||
if (ret < 0)
|
||||
return dev_err_probe(dev, ret, "Failed to register irq: %d\n", irq);
|
||||
|
||||
|
@ -2957,8 +2957,8 @@ static int dw_mci_init_slot(struct dw_mci *host)
|
||||
if (host->use_dma == TRANS_MODE_IDMAC) {
|
||||
mmc->max_segs = host->ring_size;
|
||||
mmc->max_blk_size = 65535;
|
||||
mmc->max_req_size = DW_MCI_DESC_DATA_LENGTH * host->ring_size;
|
||||
mmc->max_seg_size = mmc->max_req_size;
|
||||
mmc->max_seg_size = 0x1000;
|
||||
mmc->max_req_size = mmc->max_seg_size * host->ring_size;
|
||||
mmc->max_blk_count = mmc->max_req_size / 512;
|
||||
} else if (host->use_dma == TRANS_MODE_EDMAC) {
|
||||
mmc->max_segs = 64;
|
||||
|
@ -1191,10 +1191,9 @@ static const struct sunxi_mmc_cfg sun50i_a64_emmc_cfg = {
|
||||
.needs_new_timings = true,
|
||||
};
|
||||
|
||||
static const struct sunxi_mmc_cfg sun50i_a100_cfg = {
|
||||
static const struct sunxi_mmc_cfg sun50i_h616_cfg = {
|
||||
.idma_des_size_bits = 16,
|
||||
.idma_des_shift = 2,
|
||||
.clk_delays = NULL,
|
||||
.can_calibrate = true,
|
||||
.mask_data0 = true,
|
||||
.needs_new_timings = true,
|
||||
@ -1217,8 +1216,9 @@ static const struct of_device_id sunxi_mmc_of_match[] = {
|
||||
{ .compatible = "allwinner,sun20i-d1-mmc", .data = &sun20i_d1_cfg },
|
||||
{ .compatible = "allwinner,sun50i-a64-mmc", .data = &sun50i_a64_cfg },
|
||||
{ .compatible = "allwinner,sun50i-a64-emmc", .data = &sun50i_a64_emmc_cfg },
|
||||
{ .compatible = "allwinner,sun50i-a100-mmc", .data = &sun50i_a100_cfg },
|
||||
{ .compatible = "allwinner,sun50i-a100-mmc", .data = &sun20i_d1_cfg },
|
||||
{ .compatible = "allwinner,sun50i-a100-emmc", .data = &sun50i_a100_emmc_cfg },
|
||||
{ .compatible = "allwinner,sun50i-h616-mmc", .data = &sun50i_h616_cfg },
|
||||
{ /* sentinel */ }
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, sunxi_mmc_of_match);
|
||||
|
@ -125,7 +125,8 @@ static int scmi_perf_domain_probe(struct scmi_device *sdev)
|
||||
scmi_pd->ph = ph;
|
||||
scmi_pd->genpd.name = scmi_pd->info->name;
|
||||
scmi_pd->genpd.flags = GENPD_FLAG_ALWAYS_ON |
|
||||
GENPD_FLAG_OPP_TABLE_FW;
|
||||
GENPD_FLAG_OPP_TABLE_FW |
|
||||
GENPD_FLAG_DEV_NAME_FW;
|
||||
scmi_pd->genpd.set_performance_state = scmi_pd_set_perf_state;
|
||||
scmi_pd->genpd.attach_dev = scmi_pd_attach_dev;
|
||||
scmi_pd->genpd.detach_dev = scmi_pd_detach_dev;
|
||||
|
@ -7,6 +7,7 @@
|
||||
#define pr_fmt(fmt) "PM: " fmt
|
||||
|
||||
#include <linux/delay.h>
|
||||
#include <linux/idr.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/platform_device.h>
|
||||
@ -23,6 +24,9 @@
|
||||
#include <linux/cpu.h>
|
||||
#include <linux/debugfs.h>
|
||||
|
||||
/* Provides a unique ID for each genpd device */
|
||||
static DEFINE_IDA(genpd_ida);
|
||||
|
||||
#define GENPD_RETRY_MAX_MS 250 /* Approximate */
|
||||
|
||||
#define GENPD_DEV_CALLBACK(genpd, type, callback, dev) \
|
||||
@ -171,6 +175,7 @@ static const struct genpd_lock_ops genpd_raw_spin_ops = {
|
||||
#define genpd_is_cpu_domain(genpd) (genpd->flags & GENPD_FLAG_CPU_DOMAIN)
|
||||
#define genpd_is_rpm_always_on(genpd) (genpd->flags & GENPD_FLAG_RPM_ALWAYS_ON)
|
||||
#define genpd_is_opp_table_fw(genpd) (genpd->flags & GENPD_FLAG_OPP_TABLE_FW)
|
||||
#define genpd_is_dev_name_fw(genpd) (genpd->flags & GENPD_FLAG_DEV_NAME_FW)
|
||||
|
||||
static inline bool irq_safe_dev_in_sleep_domain(struct device *dev,
|
||||
const struct generic_pm_domain *genpd)
|
||||
@ -189,7 +194,7 @@ static inline bool irq_safe_dev_in_sleep_domain(struct device *dev,
|
||||
|
||||
if (ret)
|
||||
dev_warn_once(dev, "PM domain %s will not be powered off\n",
|
||||
genpd->name);
|
||||
dev_name(&genpd->dev));
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -274,7 +279,7 @@ static void genpd_debug_remove(struct generic_pm_domain *genpd)
|
||||
if (!genpd_debugfs_dir)
|
||||
return;
|
||||
|
||||
debugfs_lookup_and_remove(genpd->name, genpd_debugfs_dir);
|
||||
debugfs_lookup_and_remove(dev_name(&genpd->dev), genpd_debugfs_dir);
|
||||
}
|
||||
|
||||
static void genpd_update_accounting(struct generic_pm_domain *genpd)
|
||||
@ -731,7 +736,7 @@ static int _genpd_power_on(struct generic_pm_domain *genpd, bool timed)
|
||||
genpd->states[state_idx].power_on_latency_ns = elapsed_ns;
|
||||
genpd->gd->max_off_time_changed = true;
|
||||
pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n",
|
||||
genpd->name, "on", elapsed_ns);
|
||||
dev_name(&genpd->dev), "on", elapsed_ns);
|
||||
|
||||
out:
|
||||
raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_ON, NULL);
|
||||
@ -782,7 +787,7 @@ static int _genpd_power_off(struct generic_pm_domain *genpd, bool timed)
|
||||
genpd->states[state_idx].power_off_latency_ns = elapsed_ns;
|
||||
genpd->gd->max_off_time_changed = true;
|
||||
pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n",
|
||||
genpd->name, "off", elapsed_ns);
|
||||
dev_name(&genpd->dev), "off", elapsed_ns);
|
||||
|
||||
out:
|
||||
raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_OFF,
|
||||
@ -1940,7 +1945,7 @@ int dev_pm_genpd_add_notifier(struct device *dev, struct notifier_block *nb)
|
||||
|
||||
if (ret) {
|
||||
dev_warn(dev, "failed to add notifier for PM domain %s\n",
|
||||
genpd->name);
|
||||
dev_name(&genpd->dev));
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -1987,7 +1992,7 @@ int dev_pm_genpd_remove_notifier(struct device *dev)
|
||||
|
||||
if (ret) {
|
||||
dev_warn(dev, "failed to remove notifier for PM domain %s\n",
|
||||
genpd->name);
|
||||
dev_name(&genpd->dev));
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -2013,7 +2018,7 @@ static int genpd_add_subdomain(struct generic_pm_domain *genpd,
|
||||
*/
|
||||
if (!genpd_is_irq_safe(genpd) && genpd_is_irq_safe(subdomain)) {
|
||||
WARN(1, "Parent %s of subdomain %s must be IRQ safe\n",
|
||||
genpd->name, subdomain->name);
|
||||
dev_name(&genpd->dev), subdomain->name);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@ -2088,7 +2093,7 @@ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
|
||||
|
||||
if (!list_empty(&subdomain->parent_links) || subdomain->device_count) {
|
||||
pr_warn("%s: unable to remove subdomain %s\n",
|
||||
genpd->name, subdomain->name);
|
||||
dev_name(&genpd->dev), subdomain->name);
|
||||
ret = -EBUSY;
|
||||
goto out;
|
||||
}
|
||||
@ -2225,6 +2230,7 @@ int pm_genpd_init(struct generic_pm_domain *genpd,
|
||||
genpd->status = is_off ? GENPD_STATE_OFF : GENPD_STATE_ON;
|
||||
genpd->device_count = 0;
|
||||
genpd->provider = NULL;
|
||||
genpd->device_id = -ENXIO;
|
||||
genpd->has_provider = false;
|
||||
genpd->accounting_time = ktime_get_mono_fast_ns();
|
||||
genpd->domain.ops.runtime_suspend = genpd_runtime_suspend;
|
||||
@ -2265,7 +2271,18 @@ int pm_genpd_init(struct generic_pm_domain *genpd,
|
||||
return ret;
|
||||
|
||||
device_initialize(&genpd->dev);
|
||||
dev_set_name(&genpd->dev, "%s", genpd->name);
|
||||
|
||||
if (!genpd_is_dev_name_fw(genpd)) {
|
||||
dev_set_name(&genpd->dev, "%s", genpd->name);
|
||||
} else {
|
||||
ret = ida_alloc(&genpd_ida, GFP_KERNEL);
|
||||
if (ret < 0) {
|
||||
put_device(&genpd->dev);
|
||||
return ret;
|
||||
}
|
||||
genpd->device_id = ret;
|
||||
dev_set_name(&genpd->dev, "%s_%u", genpd->name, genpd->device_id);
|
||||
}
|
||||
|
||||
mutex_lock(&gpd_list_lock);
|
||||
list_add(&genpd->gpd_list_node, &gpd_list);
|
||||
@ -2287,13 +2304,13 @@ static int genpd_remove(struct generic_pm_domain *genpd)
|
||||
|
||||
if (genpd->has_provider) {
|
||||
genpd_unlock(genpd);
|
||||
pr_err("Provider present, unable to remove %s\n", genpd->name);
|
||||
pr_err("Provider present, unable to remove %s\n", dev_name(&genpd->dev));
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
if (!list_empty(&genpd->parent_links) || genpd->device_count) {
|
||||
genpd_unlock(genpd);
|
||||
pr_err("%s: unable to remove %s\n", __func__, genpd->name);
|
||||
pr_err("%s: unable to remove %s\n", __func__, dev_name(&genpd->dev));
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
@ -2307,9 +2324,11 @@ static int genpd_remove(struct generic_pm_domain *genpd)
|
||||
genpd_unlock(genpd);
|
||||
genpd_debug_remove(genpd);
|
||||
cancel_work_sync(&genpd->power_off_work);
|
||||
if (genpd->device_id != -ENXIO)
|
||||
ida_free(&genpd_ida, genpd->device_id);
|
||||
genpd_free_data(genpd);
|
||||
|
||||
pr_debug("%s: removed %s\n", __func__, genpd->name);
|
||||
pr_debug("%s: removed %s\n", __func__, dev_name(&genpd->dev));
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -3272,12 +3291,12 @@ static int genpd_summary_one(struct seq_file *s,
|
||||
else
|
||||
snprintf(state, sizeof(state), "%s",
|
||||
status_lookup[genpd->status]);
|
||||
seq_printf(s, "%-30s %-30s %u", genpd->name, state, genpd->performance_state);
|
||||
seq_printf(s, "%-30s %-30s %u", dev_name(&genpd->dev), state, genpd->performance_state);
|
||||
|
||||
/*
|
||||
* Modifications on the list require holding locks on both
|
||||
* parent and child, so we are safe.
|
||||
* Also genpd->name is immutable.
|
||||
* Also the device name is immutable.
|
||||
*/
|
||||
list_for_each_entry(link, &genpd->parent_links, parent_node) {
|
||||
if (list_is_first(&link->parent_node, &genpd->parent_links))
|
||||
@ -3502,7 +3521,7 @@ static void genpd_debug_add(struct generic_pm_domain *genpd)
|
||||
if (!genpd_debugfs_dir)
|
||||
return;
|
||||
|
||||
d = debugfs_create_dir(genpd->name, genpd_debugfs_dir);
|
||||
d = debugfs_create_dir(dev_name(&genpd->dev), genpd_debugfs_dir);
|
||||
|
||||
debugfs_create_file("current_state", 0444,
|
||||
d, genpd, &status_fops);
|
||||
|
@ -313,7 +313,9 @@ static void imx93_blk_ctrl_remove(struct platform_device *pdev)
|
||||
|
||||
of_genpd_del_provider(pdev->dev.of_node);
|
||||
|
||||
for (i = 0; bc->onecell_data.num_domains; i++) {
|
||||
pm_runtime_disable(&pdev->dev);
|
||||
|
||||
for (i = 0; i < bc->onecell_data.num_domains; i++) {
|
||||
struct imx93_blk_ctrl_domain *domain = &bc->domains[i];
|
||||
|
||||
pm_genpd_remove(&domain->genpd);
|
||||
|
@ -298,7 +298,7 @@ static int comp_refs(struct btrfs_delayed_ref_node *ref1,
|
||||
if (ref1->ref_root < ref2->ref_root)
|
||||
return -1;
|
||||
if (ref1->ref_root > ref2->ref_root)
|
||||
return -1;
|
||||
return 1;
|
||||
if (ref1->type == BTRFS_EXTENT_DATA_REF_KEY)
|
||||
ret = comp_data_refs(ref1, ref2);
|
||||
}
|
||||
|
@ -771,13 +771,24 @@
|
||||
INTEL_ATS_M150_IDS(MACRO__, ## __VA_ARGS__), \
|
||||
INTEL_ATS_M75_IDS(MACRO__, ## __VA_ARGS__)
|
||||
|
||||
/* MTL */
|
||||
#define INTEL_ARL_IDS(MACRO__, ...) \
|
||||
MACRO__(0x7D41, ## __VA_ARGS__), \
|
||||
/* ARL */
|
||||
#define INTEL_ARL_H_IDS(MACRO__, ...) \
|
||||
MACRO__(0x7D51, ## __VA_ARGS__), \
|
||||
MACRO__(0x7D67, ## __VA_ARGS__), \
|
||||
MACRO__(0x7DD1, ## __VA_ARGS__)
|
||||
|
||||
#define INTEL_ARL_U_IDS(MACRO__, ...) \
|
||||
MACRO__(0x7D41, ## __VA_ARGS__) \
|
||||
|
||||
#define INTEL_ARL_S_IDS(MACRO__, ...) \
|
||||
MACRO__(0x7D67, ## __VA_ARGS__), \
|
||||
MACRO__(0xB640, ## __VA_ARGS__)
|
||||
|
||||
#define INTEL_ARL_IDS(MACRO__, ...) \
|
||||
INTEL_ARL_H_IDS(MACRO__, ## __VA_ARGS__), \
|
||||
INTEL_ARL_U_IDS(MACRO__, ## __VA_ARGS__), \
|
||||
INTEL_ARL_S_IDS(MACRO__, ## __VA_ARGS__)
|
||||
|
||||
/* MTL */
|
||||
#define INTEL_MTL_IDS(MACRO__, ...) \
|
||||
INTEL_ARL_IDS(MACRO__, ## __VA_ARGS__), \
|
||||
MACRO__(0x7D40, ## __VA_ARGS__), \
|
||||
|
@ -92,6 +92,10 @@ struct dev_pm_domain_list {
|
||||
* GENPD_FLAG_OPP_TABLE_FW: The genpd provider supports performance states,
|
||||
* but its corresponding OPP tables are not
|
||||
* described in DT, but are given directly by FW.
|
||||
*
|
||||
* GENPD_FLAG_DEV_NAME_FW: Instructs genpd to generate an unique device name
|
||||
* using ida. It is used by genpd providers which
|
||||
* get their genpd-names directly from FW.
|
||||
*/
|
||||
#define GENPD_FLAG_PM_CLK (1U << 0)
|
||||
#define GENPD_FLAG_IRQ_SAFE (1U << 1)
|
||||
@ -101,6 +105,7 @@ struct dev_pm_domain_list {
|
||||
#define GENPD_FLAG_RPM_ALWAYS_ON (1U << 5)
|
||||
#define GENPD_FLAG_MIN_RESIDENCY (1U << 6)
|
||||
#define GENPD_FLAG_OPP_TABLE_FW (1U << 7)
|
||||
#define GENPD_FLAG_DEV_NAME_FW (1U << 8)
|
||||
|
||||
enum gpd_status {
|
||||
GENPD_STATE_ON = 0, /* PM domain is on */
|
||||
@ -163,6 +168,7 @@ struct generic_pm_domain {
|
||||
atomic_t sd_count; /* Number of subdomains with power "on" */
|
||||
enum gpd_status status; /* Current state of the domain */
|
||||
unsigned int device_count; /* Number of devices */
|
||||
unsigned int device_id; /* unique device id */
|
||||
unsigned int suspended_count; /* System suspend device counter */
|
||||
unsigned int prepared_count; /* Suspend counter of prepared devices */
|
||||
unsigned int performance_state; /* Aggregated max performance state */
|
||||
|
@ -2645,7 +2645,7 @@ static int balance_one(struct rq *rq, struct task_struct *prev)
|
||||
* emitted in scx_next_task_picked().
|
||||
*/
|
||||
if (SCX_HAS_OP(cpu_acquire))
|
||||
SCX_CALL_OP(0, cpu_acquire, cpu_of(rq), NULL);
|
||||
SCX_CALL_OP(SCX_KF_REST, cpu_acquire, cpu_of(rq), NULL);
|
||||
rq->scx.cpu_released = false;
|
||||
}
|
||||
|
||||
|
@ -7450,7 +7450,6 @@ static void alc287_alc1318_playback_pcm_hook(struct hda_pcm_stream *hinfo,
|
||||
struct snd_pcm_substream *substream,
|
||||
int action)
|
||||
{
|
||||
alc_write_coef_idx(codec, 0x10, 0x8806); /* Change MLK to GPIO3 */
|
||||
switch (action) {
|
||||
case HDA_GEN_PCM_ACT_OPEN:
|
||||
alc_write_coefex_idx(codec, 0x5a, 0x00, 0x954f); /* write gpio3 to high */
|
||||
@ -7464,7 +7463,6 @@ static void alc287_alc1318_playback_pcm_hook(struct hda_pcm_stream *hinfo,
|
||||
static void alc287_s4_power_gpio3_default(struct hda_codec *codec)
|
||||
{
|
||||
if (is_s4_suspend(codec)) {
|
||||
alc_write_coef_idx(codec, 0x10, 0x8806); /* Change MLK to GPIO3 */
|
||||
alc_write_coefex_idx(codec, 0x5a, 0x00, 0x554f); /* write gpio3 as default value */
|
||||
}
|
||||
}
|
||||
@ -7473,9 +7471,17 @@ static void alc287_fixup_lenovo_thinkpad_with_alc1318(struct hda_codec *codec,
|
||||
const struct hda_fixup *fix, int action)
|
||||
{
|
||||
struct alc_spec *spec = codec->spec;
|
||||
static const struct coef_fw coefs[] = {
|
||||
WRITE_COEF(0x24, 0x0013), WRITE_COEF(0x25, 0x0000), WRITE_COEF(0x26, 0xC300),
|
||||
WRITE_COEF(0x28, 0x0001), WRITE_COEF(0x29, 0xb023),
|
||||
WRITE_COEF(0x24, 0x0013), WRITE_COEF(0x25, 0x0000), WRITE_COEF(0x26, 0xC301),
|
||||
WRITE_COEF(0x28, 0x0001), WRITE_COEF(0x29, 0xb023),
|
||||
};
|
||||
|
||||
if (action != HDA_FIXUP_ACT_PRE_PROBE)
|
||||
return;
|
||||
alc_update_coef_idx(codec, 0x10, 1<<11, 1<<11);
|
||||
alc_process_coef_fw(codec, coefs);
|
||||
spec->power_hook = alc287_s4_power_gpio3_default;
|
||||
spec->gen.pcm_playback_hook = alc287_alc1318_playback_pcm_hook;
|
||||
}
|
||||
@ -10496,6 +10502,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
|
||||
SND_PCI_QUIRK(0x103c, 0x8b59, "HP Elite mt645 G7 Mobile Thin Client U89", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
|
||||
SND_PCI_QUIRK(0x103c, 0x8b5d, "HP", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
|
||||
SND_PCI_QUIRK(0x103c, 0x8b5e, "HP", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
|
||||
SND_PCI_QUIRK(0x103c, 0x8b5f, "HP", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
|
||||
SND_PCI_QUIRK(0x103c, 0x8b63, "HP Elite Dragonfly 13.5 inch G4", ALC245_FIXUP_CS35L41_SPI_4_HP_GPIO_LED),
|
||||
SND_PCI_QUIRK(0x103c, 0x8b65, "HP ProBook 455 15.6 inch G10 Notebook PC", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
|
||||
SND_PCI_QUIRK(0x103c, 0x8b66, "HP", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
|
||||
@ -11673,6 +11680,8 @@ static const struct snd_hda_pin_quirk alc269_fallback_pin_fixup_tbl[] = {
|
||||
{0x1a, 0x40000000}),
|
||||
SND_HDA_PIN_QUIRK(0x10ec0256, 0x1043, "ASUS", ALC2XX_FIXUP_HEADSET_MIC,
|
||||
{0x19, 0x40000000}),
|
||||
SND_HDA_PIN_QUIRK(0x10ec0255, 0x1558, "Clevo", ALC2XX_FIXUP_HEADSET_MIC,
|
||||
{0x19, 0x40000000}),
|
||||
{}
|
||||
};
|
||||
|
||||
|
@ -54,10 +54,17 @@ static int max9768_set_gpio(struct snd_kcontrol *kcontrol,
|
||||
{
|
||||
struct snd_soc_component *c = snd_soc_kcontrol_component(kcontrol);
|
||||
struct max9768 *max9768 = snd_soc_component_get_drvdata(c);
|
||||
bool val = !ucontrol->value.integer.value[0];
|
||||
int ret;
|
||||
|
||||
gpiod_set_value_cansleep(max9768->mute, !ucontrol->value.integer.value[0]);
|
||||
if (val != gpiod_get_value_cansleep(max9768->mute))
|
||||
ret = 1;
|
||||
else
|
||||
ret = 0;
|
||||
|
||||
return 0;
|
||||
gpiod_set_value_cansleep(max9768->mute, val);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static const DECLARE_TLV_DB_RANGE(volume_tlv,
|
||||
|
@ -270,16 +270,19 @@ static enum graph_type __graph_get_type(struct device_node *lnk)
|
||||
|
||||
if (of_node_name_eq(np, GRAPH_NODENAME_MULTI)) {
|
||||
ret = GRAPH_MULTI;
|
||||
fw_devlink_purge_absent_suppliers(&np->fwnode);
|
||||
goto out_put;
|
||||
}
|
||||
|
||||
if (of_node_name_eq(np, GRAPH_NODENAME_DPCM)) {
|
||||
ret = GRAPH_DPCM;
|
||||
fw_devlink_purge_absent_suppliers(&np->fwnode);
|
||||
goto out_put;
|
||||
}
|
||||
|
||||
if (of_node_name_eq(np, GRAPH_NODENAME_C2C)) {
|
||||
ret = GRAPH_C2C;
|
||||
fw_devlink_purge_absent_suppliers(&np->fwnode);
|
||||
goto out_put;
|
||||
}
|
||||
|
||||
|
@ -590,6 +590,14 @@ static const struct dmi_system_id sof_sdw_quirk_table[] = {
|
||||
},
|
||||
.driver_data = (void *)(SOC_SDW_CODEC_SPKR),
|
||||
},
|
||||
{
|
||||
.callback = sof_sdw_quirk_cb,
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc"),
|
||||
DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "0CF1")
|
||||
},
|
||||
.driver_data = (void *)(SOC_SDW_CODEC_SPKR),
|
||||
},
|
||||
{
|
||||
.callback = sof_sdw_quirk_cb,
|
||||
.matches = {
|
||||
|
@ -324,7 +324,6 @@ YAMAHA_DEVICE(0x105a, NULL),
|
||||
YAMAHA_DEVICE(0x105b, NULL),
|
||||
YAMAHA_DEVICE(0x105c, NULL),
|
||||
YAMAHA_DEVICE(0x105d, NULL),
|
||||
YAMAHA_DEVICE(0x1718, "P-125"),
|
||||
{
|
||||
USB_DEVICE(0x0499, 0x1503),
|
||||
QUIRK_DRIVER_INFO {
|
||||
@ -391,6 +390,19 @@ YAMAHA_DEVICE(0x1718, "P-125"),
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
USB_DEVICE(0x0499, 0x1718),
|
||||
QUIRK_DRIVER_INFO {
|
||||
/* .vendor_name = "Yamaha", */
|
||||
/* .product_name = "P-125", */
|
||||
QUIRK_DATA_COMPOSITE {
|
||||
{ QUIRK_DATA_STANDARD_AUDIO(1) },
|
||||
{ QUIRK_DATA_STANDARD_AUDIO(2) },
|
||||
{ QUIRK_DATA_MIDI_YAMAHA(3) },
|
||||
QUIRK_COMPOSITE_END
|
||||
}
|
||||
}
|
||||
},
|
||||
YAMAHA_DEVICE(0x2000, "DGP-7"),
|
||||
YAMAHA_DEVICE(0x2001, "DGP-5"),
|
||||
YAMAHA_DEVICE(0x2002, NULL),
|
||||
|
Loading…
Reference in New Issue
Block a user