mirror of
https://github.com/torvalds/linux.git
synced 2024-12-11 21:52:04 +00:00
Merge branches 'msm-next-lumag-dpu' and 'msm-next-lumag-dsi' into msm-next-lumag
This commit is contained in:
commit
db22583d6c
@ -21,6 +21,7 @@ properties:
|
||||
- qcom,sc7280-edp
|
||||
- qcom,sc8180x-dp
|
||||
- qcom,sc8180x-edp
|
||||
- qcom,sm8350-dp
|
||||
|
||||
reg:
|
||||
items:
|
||||
|
@ -14,8 +14,9 @@ allOf:
|
||||
|
||||
properties:
|
||||
compatible:
|
||||
items:
|
||||
- const: qcom,mdss-dsi-ctrl
|
||||
enum:
|
||||
- qcom,mdss-dsi-ctrl
|
||||
- qcom,dsi-ctrl-6g-qcm2290
|
||||
|
||||
reg:
|
||||
maxItems: 1
|
||||
|
@ -35,6 +35,38 @@ properties:
|
||||
Connected to DSI0_MIPI_DSI_PLL_VDDA0P9 pin for sc7180 target and
|
||||
connected to VDDA_MIPI_DSI_0_PLL_0P9 pin for sdm845 target
|
||||
|
||||
qcom,phy-rescode-offset-top:
|
||||
$ref: /schemas/types.yaml#/definitions/int8-array
|
||||
minItems: 5
|
||||
maxItems: 5
|
||||
description:
|
||||
Integer array of offset for pull-up legs rescode for all five lanes.
|
||||
To offset the drive strength from the calibrated value in an increasing
|
||||
manner, -32 is the weakest and +31 is the strongest.
|
||||
items:
|
||||
minimum: -32
|
||||
maximum: 31
|
||||
|
||||
qcom,phy-rescode-offset-bot:
|
||||
$ref: /schemas/types.yaml#/definitions/int8-array
|
||||
minItems: 5
|
||||
maxItems: 5
|
||||
description:
|
||||
Integer array of offset for pull-down legs rescode for all five lanes.
|
||||
To offset the drive strength from the calibrated value in a decreasing
|
||||
manner, -32 is the weakest and +31 is the strongest.
|
||||
items:
|
||||
minimum: -32
|
||||
maximum: 31
|
||||
|
||||
qcom,phy-drive-ldo-level:
|
||||
$ref: "/schemas/types.yaml#/definitions/uint32"
|
||||
description:
|
||||
The PHY LDO has an amplitude tuning feature to adjust the LDO output
|
||||
for the HSTX drive. Use supported levels (mV) to offset the drive level
|
||||
from the default value.
|
||||
enum: [ 375, 400, 425, 450, 475, 500 ]
|
||||
|
||||
required:
|
||||
- compatible
|
||||
- reg
|
||||
@ -64,5 +96,9 @@ examples:
|
||||
clocks = <&dispcc DISP_CC_MDSS_AHB_CLK>,
|
||||
<&rpmhcc RPMH_CXO_CLK>;
|
||||
clock-names = "iface", "ref";
|
||||
|
||||
qcom,phy-rescode-offset-top = /bits/ 8 <0 0 0 0 0>;
|
||||
qcom,phy-rescode-offset-bot = /bits/ 8 <0 0 0 0 0>;
|
||||
qcom,phy-drive-ldo-level = <400>;
|
||||
};
|
||||
...
|
||||
|
@ -33,15 +33,6 @@ config DRM_MSM_GPU_STATE
|
||||
depends on DRM_MSM && (DEBUG_FS || DEV_COREDUMP)
|
||||
default y
|
||||
|
||||
config DRM_MSM_REGISTER_LOGGING
|
||||
bool "MSM DRM register logging"
|
||||
depends on DRM_MSM
|
||||
default n
|
||||
help
|
||||
Compile in support for logging register reads/writes in a format
|
||||
that can be parsed by envytools demsm tool. If enabled, register
|
||||
logging can be switched on via msm.reglog=y module param.
|
||||
|
||||
config DRM_MSM_GPU_SUDO
|
||||
bool "Enable SUDO flag on submits"
|
||||
depends on DRM_MSM && EXPERT
|
||||
|
@ -86,6 +86,7 @@ msm-y := \
|
||||
msm_gem_vma.o \
|
||||
msm_gpu.o \
|
||||
msm_gpu_devfreq.o \
|
||||
msm_io_utils.o \
|
||||
msm_iommu.o \
|
||||
msm_perf.o \
|
||||
msm_rd.o \
|
||||
|
@ -1506,7 +1506,7 @@ static void a6xx_llc_slices_init(struct platform_device *pdev,
|
||||
if (a6xx_gpu->have_mmu500)
|
||||
a6xx_gpu->llc_mmio = NULL;
|
||||
else
|
||||
a6xx_gpu->llc_mmio = msm_ioremap(pdev, "cx_mem", "gpu_cx");
|
||||
a6xx_gpu->llc_mmio = msm_ioremap(pdev, "cx_mem");
|
||||
|
||||
a6xx_gpu->llc_slice = llcc_slice_getd(LLCC_GPU);
|
||||
a6xx_gpu->htw_llc_slice = llcc_slice_getd(LLCC_GPUHTW);
|
||||
|
@ -227,7 +227,8 @@ adreno_iommu_create_address_space(struct msm_gpu *gpu,
|
||||
return aspace;
|
||||
}
|
||||
|
||||
int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value)
|
||||
int adreno_get_param(struct msm_gpu *gpu, struct msm_file_private *ctx,
|
||||
uint32_t param, uint64_t *value)
|
||||
{
|
||||
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
|
||||
|
||||
@ -268,7 +269,7 @@ int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value)
|
||||
*value = 0;
|
||||
return 0;
|
||||
case MSM_PARAM_FAULTS:
|
||||
*value = gpu->global_faults;
|
||||
*value = gpu->global_faults + ctx->aspace->faults;
|
||||
return 0;
|
||||
case MSM_PARAM_SUSPENDS:
|
||||
*value = gpu->suspend_count;
|
||||
|
@ -279,7 +279,8 @@ static inline int adreno_is_a650_family(struct adreno_gpu *gpu)
|
||||
adreno_is_a660_family(gpu);
|
||||
}
|
||||
|
||||
int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value);
|
||||
int adreno_get_param(struct msm_gpu *gpu, struct msm_file_private *ctx,
|
||||
uint32_t param, uint64_t *value);
|
||||
const struct firmware *adreno_request_fw(struct adreno_gpu *adreno_gpu,
|
||||
const char *fwname);
|
||||
struct drm_gem_object *adreno_fw_create_bo(struct msm_gpu *gpu,
|
||||
|
@ -778,12 +778,6 @@ fail:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static long dpu_kms_round_pixclk(struct msm_kms *kms, unsigned long rate,
|
||||
struct drm_encoder *encoder)
|
||||
{
|
||||
return rate;
|
||||
}
|
||||
|
||||
static void _dpu_kms_hw_destroy(struct dpu_kms *dpu_kms)
|
||||
{
|
||||
int i;
|
||||
@ -952,7 +946,6 @@ static const struct msm_kms_funcs kms_funcs = {
|
||||
.disable_vblank = dpu_kms_disable_vblank,
|
||||
.check_modified_format = dpu_format_check_modified_format,
|
||||
.get_format = dpu_get_msm_format,
|
||||
.round_pixclk = dpu_kms_round_pixclk,
|
||||
.destroy = dpu_kms_destroy,
|
||||
.snapshot = dpu_kms_mdp_snapshot,
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
@ -1033,7 +1026,7 @@ static int dpu_kms_hw_init(struct msm_kms *kms)
|
||||
|
||||
atomic_set(&dpu_kms->bandwidth_ref, 0);
|
||||
|
||||
dpu_kms->mmio = msm_ioremap(dpu_kms->pdev, "mdp", "mdp");
|
||||
dpu_kms->mmio = msm_ioremap(dpu_kms->pdev, "mdp");
|
||||
if (IS_ERR(dpu_kms->mmio)) {
|
||||
rc = PTR_ERR(dpu_kms->mmio);
|
||||
DPU_ERROR("mdp register memory map failed: %d\n", rc);
|
||||
@ -1042,20 +1035,20 @@ static int dpu_kms_hw_init(struct msm_kms *kms)
|
||||
}
|
||||
DRM_DEBUG("mapped dpu address space @%pK\n", dpu_kms->mmio);
|
||||
|
||||
dpu_kms->vbif[VBIF_RT] = msm_ioremap(dpu_kms->pdev, "vbif", "vbif");
|
||||
dpu_kms->vbif[VBIF_RT] = msm_ioremap(dpu_kms->pdev, "vbif");
|
||||
if (IS_ERR(dpu_kms->vbif[VBIF_RT])) {
|
||||
rc = PTR_ERR(dpu_kms->vbif[VBIF_RT]);
|
||||
DPU_ERROR("vbif register memory map failed: %d\n", rc);
|
||||
dpu_kms->vbif[VBIF_RT] = NULL;
|
||||
goto error;
|
||||
}
|
||||
dpu_kms->vbif[VBIF_NRT] = msm_ioremap_quiet(dpu_kms->pdev, "vbif_nrt", "vbif_nrt");
|
||||
dpu_kms->vbif[VBIF_NRT] = msm_ioremap_quiet(dpu_kms->pdev, "vbif_nrt");
|
||||
if (IS_ERR(dpu_kms->vbif[VBIF_NRT])) {
|
||||
dpu_kms->vbif[VBIF_NRT] = NULL;
|
||||
DPU_DEBUG("VBIF NRT is not defined");
|
||||
}
|
||||
|
||||
dpu_kms->reg_dma = msm_ioremap_quiet(dpu_kms->pdev, "regdma", "regdma");
|
||||
dpu_kms->reg_dma = msm_ioremap_quiet(dpu_kms->pdev, "regdma");
|
||||
if (IS_ERR(dpu_kms->reg_dma)) {
|
||||
dpu_kms->reg_dma = NULL;
|
||||
DPU_DEBUG("REG_DMA is not defined");
|
||||
|
@ -214,7 +214,7 @@ int dpu_mdss_init(struct platform_device *pdev)
|
||||
if (!dpu_mdss)
|
||||
return -ENOMEM;
|
||||
|
||||
dpu_mdss->mmio = msm_ioremap(pdev, "mdss", "mdss");
|
||||
dpu_mdss->mmio = msm_ioremap(pdev, "mdss");
|
||||
if (IS_ERR(dpu_mdss->mmio))
|
||||
return PTR_ERR(dpu_mdss->mmio);
|
||||
|
||||
|
@ -418,7 +418,7 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
|
||||
|
||||
mdp4_kms->dev = dev;
|
||||
|
||||
mdp4_kms->mmio = msm_ioremap(pdev, NULL, "MDP4");
|
||||
mdp4_kms->mmio = msm_ioremap(pdev, NULL);
|
||||
if (IS_ERR(mdp4_kms->mmio)) {
|
||||
ret = PTR_ERR(mdp4_kms->mmio);
|
||||
goto fail;
|
||||
|
@ -190,12 +190,6 @@ static void mdp5_complete_commit(struct msm_kms *kms, unsigned crtc_mask)
|
||||
mdp5_smp_complete_commit(mdp5_kms->smp, &global_state->smp);
|
||||
}
|
||||
|
||||
static long mdp5_round_pixclk(struct msm_kms *kms, unsigned long rate,
|
||||
struct drm_encoder *encoder)
|
||||
{
|
||||
return rate;
|
||||
}
|
||||
|
||||
static int mdp5_set_split_display(struct msm_kms *kms,
|
||||
struct drm_encoder *encoder,
|
||||
struct drm_encoder *slave_encoder,
|
||||
@ -278,7 +272,6 @@ static const struct mdp_kms_funcs kms_funcs = {
|
||||
.wait_flush = mdp5_wait_flush,
|
||||
.complete_commit = mdp5_complete_commit,
|
||||
.get_format = mdp_get_format,
|
||||
.round_pixclk = mdp5_round_pixclk,
|
||||
.set_split_display = mdp5_set_split_display,
|
||||
.destroy = mdp5_kms_destroy,
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
@ -827,7 +820,7 @@ static int mdp5_init(struct platform_device *pdev, struct drm_device *dev)
|
||||
if (ret)
|
||||
goto fail;
|
||||
|
||||
mdp5_kms->mmio = msm_ioremap(pdev, "mdp_phys", "MDP5");
|
||||
mdp5_kms->mmio = msm_ioremap(pdev, "mdp_phys");
|
||||
if (IS_ERR(mdp5_kms->mmio)) {
|
||||
ret = PTR_ERR(mdp5_kms->mmio);
|
||||
goto fail;
|
||||
|
@ -210,13 +210,13 @@ int mdp5_mdss_init(struct platform_device *pdev)
|
||||
|
||||
mdp5_mdss->base.dev = &pdev->dev;
|
||||
|
||||
mdp5_mdss->mmio = msm_ioremap(pdev, "mdss_phys", "MDSS");
|
||||
mdp5_mdss->mmio = msm_ioremap(pdev, "mdss_phys");
|
||||
if (IS_ERR(mdp5_mdss->mmio)) {
|
||||
ret = PTR_ERR(mdp5_mdss->mmio);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
mdp5_mdss->vbif = msm_ioremap(pdev, "vbif_phys", "VBIF");
|
||||
mdp5_mdss->vbif = msm_ioremap(pdev, "vbif_phys");
|
||||
if (IS_ERR(mdp5_mdss->vbif)) {
|
||||
ret = PTR_ERR(mdp5_mdss->vbif);
|
||||
goto fail;
|
||||
|
@ -68,7 +68,7 @@ static int smp_request_block(struct mdp5_smp *smp,
|
||||
uint8_t reserved;
|
||||
|
||||
/* we shouldn't be requesting blocks for an in-use client: */
|
||||
WARN_ON(bitmap_weight(cs, cnt) > 0);
|
||||
WARN_ON(!bitmap_empty(cs, cnt));
|
||||
|
||||
reserved = smp->reserved[cid];
|
||||
|
||||
|
@ -456,19 +456,19 @@ void dp_catalog_ctrl_config_msa(struct dp_catalog *dp_catalog,
|
||||
dp_write_p0(catalog, MMSS_DP_DSC_DTO, 0x0);
|
||||
}
|
||||
|
||||
int dp_catalog_ctrl_set_pattern(struct dp_catalog *dp_catalog,
|
||||
u32 pattern)
|
||||
int dp_catalog_ctrl_set_pattern_state_bit(struct dp_catalog *dp_catalog,
|
||||
u32 state_bit)
|
||||
{
|
||||
int bit, ret;
|
||||
u32 data;
|
||||
struct dp_catalog_private *catalog = container_of(dp_catalog,
|
||||
struct dp_catalog_private, dp_catalog);
|
||||
|
||||
bit = BIT(pattern - 1);
|
||||
DRM_DEBUG_DP("hw: bit=%d train=%d\n", bit, pattern);
|
||||
bit = BIT(state_bit - 1);
|
||||
DRM_DEBUG_DP("hw: bit=%d train=%d\n", bit, state_bit);
|
||||
dp_catalog_ctrl_state_ctrl(dp_catalog, bit);
|
||||
|
||||
bit = BIT(pattern - 1) << DP_MAINLINK_READY_LINK_TRAINING_SHIFT;
|
||||
bit = BIT(state_bit - 1) << DP_MAINLINK_READY_LINK_TRAINING_SHIFT;
|
||||
|
||||
/* Poll for mainlink ready status */
|
||||
ret = readx_poll_timeout(readl, catalog->io->dp_controller.link.base +
|
||||
@ -476,7 +476,7 @@ int dp_catalog_ctrl_set_pattern(struct dp_catalog *dp_catalog,
|
||||
data, data & bit,
|
||||
POLLING_SLEEP_US, POLLING_TIMEOUT_US);
|
||||
if (ret < 0) {
|
||||
DRM_ERROR("set pattern for link_train=%d failed\n", pattern);
|
||||
DRM_ERROR("set state_bit for link_train=%d failed\n", state_bit);
|
||||
return ret;
|
||||
}
|
||||
return 0;
|
||||
|
@ -94,7 +94,7 @@ void dp_catalog_ctrl_mainlink_ctrl(struct dp_catalog *dp_catalog, bool enable);
|
||||
void dp_catalog_ctrl_config_misc(struct dp_catalog *dp_catalog, u32 cc, u32 tb);
|
||||
void dp_catalog_ctrl_config_msa(struct dp_catalog *dp_catalog, u32 rate,
|
||||
u32 stream_rate_khz, bool fixed_nvid);
|
||||
int dp_catalog_ctrl_set_pattern(struct dp_catalog *dp_catalog, u32 pattern);
|
||||
int dp_catalog_ctrl_set_pattern_state_bit(struct dp_catalog *dp_catalog, u32 pattern);
|
||||
void dp_catalog_ctrl_reset(struct dp_catalog *dp_catalog);
|
||||
bool dp_catalog_ctrl_mainlink_ready(struct dp_catalog *dp_catalog);
|
||||
void dp_catalog_ctrl_enable_irq(struct dp_catalog *dp_catalog, bool enable);
|
||||
|
@ -1083,7 +1083,7 @@ static int dp_ctrl_link_train_1(struct dp_ctrl_private *ctrl,
|
||||
|
||||
*training_step = DP_TRAINING_1;
|
||||
|
||||
ret = dp_catalog_ctrl_set_pattern(ctrl->catalog, DP_TRAINING_PATTERN_1);
|
||||
ret = dp_catalog_ctrl_set_pattern_state_bit(ctrl->catalog, 1);
|
||||
if (ret)
|
||||
return ret;
|
||||
dp_ctrl_train_pattern_set(ctrl, DP_TRAINING_PATTERN_1 |
|
||||
@ -1181,7 +1181,8 @@ static int dp_ctrl_link_train_2(struct dp_ctrl_private *ctrl,
|
||||
int *training_step)
|
||||
{
|
||||
int tries = 0, ret = 0;
|
||||
char pattern;
|
||||
u8 pattern;
|
||||
u32 state_ctrl_bit;
|
||||
int const maximum_retries = 5;
|
||||
u8 link_status[DP_LINK_STATUS_SIZE];
|
||||
|
||||
@ -1189,12 +1190,18 @@ static int dp_ctrl_link_train_2(struct dp_ctrl_private *ctrl,
|
||||
|
||||
*training_step = DP_TRAINING_2;
|
||||
|
||||
if (drm_dp_tps3_supported(ctrl->panel->dpcd))
|
||||
if (drm_dp_tps4_supported(ctrl->panel->dpcd)) {
|
||||
pattern = DP_TRAINING_PATTERN_4;
|
||||
state_ctrl_bit = 4;
|
||||
} else if (drm_dp_tps3_supported(ctrl->panel->dpcd)) {
|
||||
pattern = DP_TRAINING_PATTERN_3;
|
||||
else
|
||||
state_ctrl_bit = 3;
|
||||
} else {
|
||||
pattern = DP_TRAINING_PATTERN_2;
|
||||
state_ctrl_bit = 2;
|
||||
}
|
||||
|
||||
ret = dp_catalog_ctrl_set_pattern(ctrl->catalog, pattern);
|
||||
ret = dp_catalog_ctrl_set_pattern_state_bit(ctrl->catalog, state_ctrl_bit);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
@ -1365,60 +1372,48 @@ static int dp_ctrl_enable_stream_clocks(struct dp_ctrl_private *ctrl)
|
||||
return ret;
|
||||
}
|
||||
|
||||
int dp_ctrl_host_init(struct dp_ctrl *dp_ctrl, bool flip, bool reset)
|
||||
void dp_ctrl_reset_irq_ctrl(struct dp_ctrl *dp_ctrl, bool enable)
|
||||
{
|
||||
struct dp_ctrl_private *ctrl;
|
||||
struct dp_io *dp_io;
|
||||
struct phy *phy;
|
||||
|
||||
if (!dp_ctrl) {
|
||||
DRM_ERROR("Invalid input data\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
|
||||
dp_io = &ctrl->parser->io;
|
||||
phy = dp_io->phy;
|
||||
|
||||
ctrl->dp_ctrl.orientation = flip;
|
||||
dp_catalog_ctrl_reset(ctrl->catalog);
|
||||
|
||||
if (reset)
|
||||
dp_catalog_ctrl_reset(ctrl->catalog);
|
||||
|
||||
DRM_DEBUG_DP("flip=%d\n", flip);
|
||||
dp_catalog_ctrl_phy_reset(ctrl->catalog);
|
||||
phy_init(phy);
|
||||
dp_catalog_ctrl_enable_irq(ctrl->catalog, true);
|
||||
|
||||
return 0;
|
||||
if (enable)
|
||||
dp_catalog_ctrl_enable_irq(ctrl->catalog, enable);
|
||||
}
|
||||
|
||||
/**
|
||||
* dp_ctrl_host_deinit() - Uninitialize DP controller
|
||||
* @dp_ctrl: Display Port Driver data
|
||||
*
|
||||
* Perform required steps to uninitialize DP controller
|
||||
* and its resources.
|
||||
*/
|
||||
void dp_ctrl_host_deinit(struct dp_ctrl *dp_ctrl)
|
||||
void dp_ctrl_phy_init(struct dp_ctrl *dp_ctrl)
|
||||
{
|
||||
struct dp_ctrl_private *ctrl;
|
||||
struct dp_io *dp_io;
|
||||
struct phy *phy;
|
||||
|
||||
if (!dp_ctrl) {
|
||||
DRM_ERROR("Invalid input data\n");
|
||||
return;
|
||||
}
|
||||
ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
|
||||
dp_io = &ctrl->parser->io;
|
||||
phy = dp_io->phy;
|
||||
|
||||
dp_catalog_ctrl_phy_reset(ctrl->catalog);
|
||||
phy_init(phy);
|
||||
DRM_DEBUG_DP("phy=%p init=%d power_on=%d\n",
|
||||
phy, phy->init_count, phy->power_count);
|
||||
}
|
||||
|
||||
void dp_ctrl_phy_exit(struct dp_ctrl *dp_ctrl)
|
||||
{
|
||||
struct dp_ctrl_private *ctrl;
|
||||
struct dp_io *dp_io;
|
||||
struct phy *phy;
|
||||
|
||||
ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
|
||||
dp_io = &ctrl->parser->io;
|
||||
phy = dp_io->phy;
|
||||
|
||||
dp_catalog_ctrl_enable_irq(ctrl->catalog, false);
|
||||
dp_catalog_ctrl_phy_reset(ctrl->catalog);
|
||||
phy_exit(phy);
|
||||
|
||||
DRM_DEBUG_DP("Host deinitialized successfully\n");
|
||||
DRM_DEBUG_DP("phy=%p init=%d power_on=%d\n",
|
||||
phy, phy->init_count, phy->power_count);
|
||||
}
|
||||
|
||||
static bool dp_ctrl_use_fixed_nvid(struct dp_ctrl_private *ctrl)
|
||||
@ -1488,8 +1483,13 @@ static int dp_ctrl_deinitialize_mainlink(struct dp_ctrl_private *ctrl)
|
||||
}
|
||||
|
||||
phy_power_off(phy);
|
||||
phy_exit(phy);
|
||||
|
||||
/* aux channel down, reinit phy */
|
||||
phy_exit(phy);
|
||||
phy_init(phy);
|
||||
|
||||
DRM_DEBUG_DP("phy=%p init=%d power_on=%d\n",
|
||||
phy, phy->init_count, phy->power_count);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1761,6 +1761,9 @@ int dp_ctrl_on_link(struct dp_ctrl *dp_ctrl)
|
||||
/* end with failure */
|
||||
break; /* lane == 1 already */
|
||||
}
|
||||
|
||||
/* stop link training before start re training */
|
||||
dp_ctrl_clear_training_pattern(ctrl);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1893,33 +1896,20 @@ int dp_ctrl_off_link_stream(struct dp_ctrl *dp_ctrl)
|
||||
return ret;
|
||||
}
|
||||
|
||||
DRM_DEBUG_DP("Before, phy=%x init_count=%d power_on=%d\n",
|
||||
(u32)(uintptr_t)phy, phy->init_count, phy->power_count);
|
||||
|
||||
phy_power_off(phy);
|
||||
|
||||
/* aux channel down, reinit phy */
|
||||
phy_exit(phy);
|
||||
phy_init(phy);
|
||||
|
||||
DRM_DEBUG_DP("DP off link/stream done\n");
|
||||
DRM_DEBUG_DP("phy=%p init=%d power_on=%d\n",
|
||||
phy, phy->init_count, phy->power_count);
|
||||
return ret;
|
||||
}
|
||||
|
||||
void dp_ctrl_off_phy(struct dp_ctrl *dp_ctrl)
|
||||
{
|
||||
struct dp_ctrl_private *ctrl;
|
||||
struct dp_io *dp_io;
|
||||
struct phy *phy;
|
||||
|
||||
ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
|
||||
dp_io = &ctrl->parser->io;
|
||||
phy = dp_io->phy;
|
||||
|
||||
dp_catalog_ctrl_reset(ctrl->catalog);
|
||||
|
||||
phy_exit(phy);
|
||||
|
||||
DRM_DEBUG_DP("DP off phy done\n");
|
||||
}
|
||||
|
||||
int dp_ctrl_off(struct dp_ctrl *dp_ctrl)
|
||||
{
|
||||
struct dp_ctrl_private *ctrl;
|
||||
@ -1948,9 +1938,9 @@ int dp_ctrl_off(struct dp_ctrl *dp_ctrl)
|
||||
}
|
||||
|
||||
phy_power_off(phy);
|
||||
phy_exit(phy);
|
||||
DRM_DEBUG_DP("phy=%p init=%d power_on=%d\n",
|
||||
phy, phy->init_count, phy->power_count);
|
||||
|
||||
DRM_DEBUG_DP("DP off done\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -19,12 +19,9 @@ struct dp_ctrl {
|
||||
u32 pixel_rate;
|
||||
};
|
||||
|
||||
int dp_ctrl_host_init(struct dp_ctrl *dp_ctrl, bool flip, bool reset);
|
||||
void dp_ctrl_host_deinit(struct dp_ctrl *dp_ctrl);
|
||||
int dp_ctrl_on_link(struct dp_ctrl *dp_ctrl);
|
||||
int dp_ctrl_on_stream(struct dp_ctrl *dp_ctrl);
|
||||
int dp_ctrl_off_link_stream(struct dp_ctrl *dp_ctrl);
|
||||
void dp_ctrl_off_phy(struct dp_ctrl *dp_ctrl);
|
||||
int dp_ctrl_off(struct dp_ctrl *dp_ctrl);
|
||||
void dp_ctrl_push_idle(struct dp_ctrl *dp_ctrl);
|
||||
void dp_ctrl_isr(struct dp_ctrl *dp_ctrl);
|
||||
@ -34,4 +31,9 @@ struct dp_ctrl *dp_ctrl_get(struct device *dev, struct dp_link *link,
|
||||
struct dp_power *power, struct dp_catalog *catalog,
|
||||
struct dp_parser *parser);
|
||||
|
||||
void dp_ctrl_reset_irq_ctrl(struct dp_ctrl *dp_ctrl, bool enable);
|
||||
void dp_ctrl_phy_init(struct dp_ctrl *dp_ctrl);
|
||||
void dp_ctrl_phy_exit(struct dp_ctrl *dp_ctrl);
|
||||
void dp_ctrl_irq_phy_exit(struct dp_ctrl *dp_ctrl);
|
||||
|
||||
#endif /* _DP_CTRL_H_ */
|
||||
|
@ -207,39 +207,39 @@ static const struct file_operations test_active_fops = {
|
||||
.write = dp_test_active_write
|
||||
};
|
||||
|
||||
static int dp_debug_init(struct dp_debug *dp_debug, struct drm_minor *minor)
|
||||
static void dp_debug_init(struct dp_debug *dp_debug, struct drm_minor *minor)
|
||||
{
|
||||
int rc = 0;
|
||||
char path[64];
|
||||
struct dp_debug_private *debug = container_of(dp_debug,
|
||||
struct dp_debug_private, dp_debug);
|
||||
|
||||
debugfs_create_file("dp_debug", 0444, minor->debugfs_root,
|
||||
snprintf(path, sizeof(path), "msm_dp-%s", debug->connector->name);
|
||||
|
||||
debug->root = debugfs_create_dir(path, minor->debugfs_root);
|
||||
|
||||
debugfs_create_file("dp_debug", 0444, debug->root,
|
||||
debug, &dp_debug_fops);
|
||||
|
||||
debugfs_create_file("msm_dp_test_active", 0444,
|
||||
minor->debugfs_root,
|
||||
debug->root,
|
||||
debug, &test_active_fops);
|
||||
|
||||
debugfs_create_file("msm_dp_test_data", 0444,
|
||||
minor->debugfs_root,
|
||||
debug->root,
|
||||
debug, &dp_test_data_fops);
|
||||
|
||||
debugfs_create_file("msm_dp_test_type", 0444,
|
||||
minor->debugfs_root,
|
||||
debug->root,
|
||||
debug, &dp_test_type_fops);
|
||||
|
||||
debug->root = minor->debugfs_root;
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
struct dp_debug *dp_debug_get(struct device *dev, struct dp_panel *panel,
|
||||
struct dp_usbpd *usbpd, struct dp_link *link,
|
||||
struct drm_connector *connector, struct drm_minor *minor)
|
||||
{
|
||||
int rc = 0;
|
||||
struct dp_debug_private *debug;
|
||||
struct dp_debug *dp_debug;
|
||||
int rc;
|
||||
|
||||
if (!dev || !panel || !usbpd || !link) {
|
||||
DRM_ERROR("invalid input\n");
|
||||
@ -266,11 +266,7 @@ struct dp_debug *dp_debug_get(struct device *dev, struct dp_panel *panel,
|
||||
dp_debug->hdisplay = 0;
|
||||
dp_debug->vrefresh = 0;
|
||||
|
||||
rc = dp_debug_init(dp_debug, minor);
|
||||
if (rc) {
|
||||
devm_kfree(dev, debug);
|
||||
goto error;
|
||||
}
|
||||
dp_debug_init(dp_debug, minor);
|
||||
|
||||
return dp_debug;
|
||||
error:
|
||||
|
@ -83,6 +83,7 @@ struct dp_display_private {
|
||||
|
||||
/* state variables */
|
||||
bool core_initialized;
|
||||
bool phy_initialized;
|
||||
bool hpd_irq_on;
|
||||
bool audio_supported;
|
||||
|
||||
@ -143,10 +144,29 @@ static const struct msm_dp_config sc7280_dp_cfg = {
|
||||
.num_descs = 2,
|
||||
};
|
||||
|
||||
static const struct msm_dp_config sc8180x_dp_cfg = {
|
||||
.descs = (const struct msm_dp_desc[]) {
|
||||
[MSM_DP_CONTROLLER_0] = { .io_start = 0x0ae90000, .connector_type = DRM_MODE_CONNECTOR_DisplayPort },
|
||||
[MSM_DP_CONTROLLER_1] = { .io_start = 0x0ae98000, .connector_type = DRM_MODE_CONNECTOR_DisplayPort },
|
||||
[MSM_DP_CONTROLLER_2] = { .io_start = 0x0ae9a000, .connector_type = DRM_MODE_CONNECTOR_eDP },
|
||||
},
|
||||
.num_descs = 3,
|
||||
};
|
||||
|
||||
static const struct msm_dp_config sm8350_dp_cfg = {
|
||||
.descs = (const struct msm_dp_desc[]) {
|
||||
[MSM_DP_CONTROLLER_0] = { .io_start = 0x0ae90000, .connector_type = DRM_MODE_CONNECTOR_DisplayPort },
|
||||
},
|
||||
.num_descs = 1,
|
||||
};
|
||||
|
||||
static const struct of_device_id dp_dt_match[] = {
|
||||
{ .compatible = "qcom,sc7180-dp", .data = &sc7180_dp_cfg },
|
||||
{ .compatible = "qcom,sc7280-dp", .data = &sc7280_dp_cfg },
|
||||
{ .compatible = "qcom,sc7280-edp", .data = &sc7280_dp_cfg },
|
||||
{ .compatible = "qcom,sc8180x-dp", .data = &sc8180x_dp_cfg },
|
||||
{ .compatible = "qcom,sc8180x-edp", .data = &sc8180x_dp_cfg },
|
||||
{ .compatible = "qcom,sm8350-dp", .data = &sm8350_dp_cfg },
|
||||
{}
|
||||
};
|
||||
|
||||
@ -246,7 +266,7 @@ static int dp_display_bind(struct device *dev, struct device *master,
|
||||
goto end;
|
||||
}
|
||||
|
||||
dp->dp_display.panel_bridge = dp->parser->panel_bridge;
|
||||
dp->dp_display.next_bridge = dp->parser->next_bridge;
|
||||
|
||||
dp->aux->drm_dev = drm;
|
||||
rc = dp_aux_register(dp->aux);
|
||||
@ -372,36 +392,51 @@ end:
|
||||
return rc;
|
||||
}
|
||||
|
||||
static void dp_display_host_init(struct dp_display_private *dp, int reset)
|
||||
static void dp_display_host_phy_init(struct dp_display_private *dp)
|
||||
{
|
||||
bool flip = false;
|
||||
DRM_DEBUG_DP("type=%d core_init=%d phy_init=%d\n",
|
||||
dp->dp_display.connector_type, dp->core_initialized,
|
||||
dp->phy_initialized);
|
||||
|
||||
DRM_DEBUG_DP("core_initialized=%d\n", dp->core_initialized);
|
||||
if (dp->core_initialized) {
|
||||
DRM_DEBUG_DP("DP core already initialized\n");
|
||||
return;
|
||||
if (!dp->phy_initialized) {
|
||||
dp_ctrl_phy_init(dp->ctrl);
|
||||
dp->phy_initialized = true;
|
||||
}
|
||||
}
|
||||
|
||||
if (dp->usbpd->orientation == ORIENTATION_CC2)
|
||||
flip = true;
|
||||
static void dp_display_host_phy_exit(struct dp_display_private *dp)
|
||||
{
|
||||
DRM_DEBUG_DP("type=%d core_init=%d phy_init=%d\n",
|
||||
dp->dp_display.connector_type, dp->core_initialized,
|
||||
dp->phy_initialized);
|
||||
|
||||
dp_power_init(dp->power, flip);
|
||||
dp_ctrl_host_init(dp->ctrl, flip, reset);
|
||||
if (dp->phy_initialized) {
|
||||
dp_ctrl_phy_exit(dp->ctrl);
|
||||
dp->phy_initialized = false;
|
||||
}
|
||||
}
|
||||
|
||||
static void dp_display_host_init(struct dp_display_private *dp)
|
||||
{
|
||||
DRM_DEBUG_DP("type=%d core_init=%d phy_init=%d\n",
|
||||
dp->dp_display.connector_type, dp->core_initialized,
|
||||
dp->phy_initialized);
|
||||
|
||||
dp_power_init(dp->power, false);
|
||||
dp_ctrl_reset_irq_ctrl(dp->ctrl, true);
|
||||
dp_aux_init(dp->aux);
|
||||
dp->core_initialized = true;
|
||||
}
|
||||
|
||||
static void dp_display_host_deinit(struct dp_display_private *dp)
|
||||
{
|
||||
if (!dp->core_initialized) {
|
||||
DRM_DEBUG_DP("DP core not initialized\n");
|
||||
return;
|
||||
}
|
||||
DRM_DEBUG_DP("type=%d core_init=%d phy_init=%d\n",
|
||||
dp->dp_display.connector_type, dp->core_initialized,
|
||||
dp->phy_initialized);
|
||||
|
||||
dp_ctrl_host_deinit(dp->ctrl);
|
||||
dp_ctrl_reset_irq_ctrl(dp->ctrl, false);
|
||||
dp_aux_deinit(dp->aux);
|
||||
dp_power_deinit(dp->power);
|
||||
|
||||
dp->core_initialized = false;
|
||||
}
|
||||
|
||||
@ -409,7 +444,7 @@ static int dp_display_usbpd_configure_cb(struct device *dev)
|
||||
{
|
||||
struct dp_display_private *dp = dev_get_dp_display_private(dev);
|
||||
|
||||
dp_display_host_init(dp, false);
|
||||
dp_display_host_phy_init(dp);
|
||||
|
||||
return dp_display_process_hpd_high(dp);
|
||||
}
|
||||
@ -507,7 +542,9 @@ static int dp_hpd_plug_handle(struct dp_display_private *dp, u32 data)
|
||||
mutex_lock(&dp->event_mutex);
|
||||
|
||||
state = dp->hpd_state;
|
||||
DRM_DEBUG_DP("hpd_state=%d\n", state);
|
||||
DRM_DEBUG_DP("Before, type=%d hpd_state=%d\n",
|
||||
dp->dp_display.connector_type, state);
|
||||
|
||||
if (state == ST_DISPLAY_OFF || state == ST_SUSPENDED) {
|
||||
mutex_unlock(&dp->event_mutex);
|
||||
return 0;
|
||||
@ -530,11 +567,6 @@ static int dp_hpd_plug_handle(struct dp_display_private *dp, u32 data)
|
||||
ret = dp_display_usbpd_configure_cb(&dp->pdev->dev);
|
||||
if (ret) { /* link train failed */
|
||||
dp->hpd_state = ST_DISCONNECTED;
|
||||
|
||||
if (ret == -ECONNRESET) { /* cable unplugged */
|
||||
dp->core_initialized = false;
|
||||
}
|
||||
|
||||
} else {
|
||||
/* start sentinel checking in case of missing uevent */
|
||||
dp_add_event(dp, EV_CONNECT_PENDING_TIMEOUT, 0, tout);
|
||||
@ -544,6 +576,8 @@ static int dp_hpd_plug_handle(struct dp_display_private *dp, u32 data)
|
||||
dp_catalog_hpd_config_intr(dp->catalog,
|
||||
DP_DP_IRQ_HPD_INT_MASK | DP_DP_HPD_REPLUG_INT_MASK, true);
|
||||
|
||||
DRM_DEBUG_DP("After, type=%d hpd_state=%d\n",
|
||||
dp->dp_display.connector_type, state);
|
||||
mutex_unlock(&dp->event_mutex);
|
||||
|
||||
/* uevent will complete connection part */
|
||||
@ -560,8 +594,10 @@ static int dp_connect_pending_timeout(struct dp_display_private *dp, u32 data)
|
||||
mutex_lock(&dp->event_mutex);
|
||||
|
||||
state = dp->hpd_state;
|
||||
if (state == ST_CONNECT_PENDING)
|
||||
if (state == ST_CONNECT_PENDING) {
|
||||
dp->hpd_state = ST_CONNECTED;
|
||||
DRM_DEBUG_DP("type=%d\n", dp->dp_display.connector_type);
|
||||
}
|
||||
|
||||
mutex_unlock(&dp->event_mutex);
|
||||
|
||||
@ -594,6 +630,9 @@ static int dp_hpd_unplug_handle(struct dp_display_private *dp, u32 data)
|
||||
|
||||
state = dp->hpd_state;
|
||||
|
||||
DRM_DEBUG_DP("Before, type=%d hpd_state=%d\n",
|
||||
dp->dp_display.connector_type, state);
|
||||
|
||||
/* disable irq_hpd/replug interrupts */
|
||||
dp_catalog_hpd_config_intr(dp->catalog,
|
||||
DP_DP_IRQ_HPD_INT_MASK | DP_DP_HPD_REPLUG_INT_MASK, false);
|
||||
@ -604,8 +643,7 @@ static int dp_hpd_unplug_handle(struct dp_display_private *dp, u32 data)
|
||||
if (state == ST_DISCONNECTED) {
|
||||
/* triggered by irq_hdp with sink_count = 0 */
|
||||
if (dp->link->sink_count == 0) {
|
||||
dp_ctrl_off_phy(dp->ctrl);
|
||||
dp->core_initialized = false;
|
||||
dp_display_host_phy_exit(dp);
|
||||
}
|
||||
mutex_unlock(&dp->event_mutex);
|
||||
return 0;
|
||||
@ -637,13 +675,15 @@ static int dp_hpd_unplug_handle(struct dp_display_private *dp, u32 data)
|
||||
/* start sentinel checking in case of missing uevent */
|
||||
dp_add_event(dp, EV_DISCONNECT_PENDING_TIMEOUT, 0, DP_TIMEOUT_5_SECOND);
|
||||
|
||||
DRM_DEBUG_DP("hpd_state=%d\n", state);
|
||||
/* signal the disconnect event early to ensure proper teardown */
|
||||
dp_display_handle_plugged_change(&dp->dp_display, false);
|
||||
|
||||
/* enable HDP plug interrupt to prepare for next plugin */
|
||||
dp_catalog_hpd_config_intr(dp->catalog, DP_DP_HPD_PLUG_INT_MASK, true);
|
||||
|
||||
DRM_DEBUG_DP("After, type=%d hpd_state=%d\n",
|
||||
dp->dp_display.connector_type, state);
|
||||
|
||||
/* uevent will complete disconnection part */
|
||||
mutex_unlock(&dp->event_mutex);
|
||||
return 0;
|
||||
@ -656,8 +696,10 @@ static int dp_disconnect_pending_timeout(struct dp_display_private *dp, u32 data
|
||||
mutex_lock(&dp->event_mutex);
|
||||
|
||||
state = dp->hpd_state;
|
||||
if (state == ST_DISCONNECT_PENDING)
|
||||
if (state == ST_DISCONNECT_PENDING) {
|
||||
dp->hpd_state = ST_DISCONNECTED;
|
||||
DRM_DEBUG_DP("type=%d\n", dp->dp_display.connector_type);
|
||||
}
|
||||
|
||||
mutex_unlock(&dp->event_mutex);
|
||||
|
||||
@ -667,12 +709,14 @@ static int dp_disconnect_pending_timeout(struct dp_display_private *dp, u32 data
|
||||
static int dp_irq_hpd_handle(struct dp_display_private *dp, u32 data)
|
||||
{
|
||||
u32 state;
|
||||
int ret;
|
||||
|
||||
mutex_lock(&dp->event_mutex);
|
||||
|
||||
/* irq_hpd can happen at either connected or disconnected state */
|
||||
state = dp->hpd_state;
|
||||
DRM_DEBUG_DP("Before, type=%d hpd_state=%d\n",
|
||||
dp->dp_display.connector_type, state);
|
||||
|
||||
if (state == ST_DISPLAY_OFF || state == ST_SUSPENDED) {
|
||||
mutex_unlock(&dp->event_mutex);
|
||||
return 0;
|
||||
@ -692,17 +736,10 @@ static int dp_irq_hpd_handle(struct dp_display_private *dp, u32 data)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* dp core (ahb/aux clks) must be initialized before
|
||||
* irq_hpd be handled
|
||||
*/
|
||||
if (dp->core_initialized) {
|
||||
ret = dp_display_usbpd_attention_cb(&dp->pdev->dev);
|
||||
if (ret == -ECONNRESET) { /* cable unplugged */
|
||||
dp->core_initialized = false;
|
||||
}
|
||||
}
|
||||
DRM_DEBUG_DP("hpd_state=%d\n", state);
|
||||
dp_display_usbpd_attention_cb(&dp->pdev->dev);
|
||||
|
||||
DRM_DEBUG_DP("After, type=%d hpd_state=%d\n",
|
||||
dp->dp_display.connector_type, state);
|
||||
|
||||
mutex_unlock(&dp->event_mutex);
|
||||
|
||||
@ -892,12 +929,19 @@ static int dp_display_disable(struct dp_display_private *dp, u32 data)
|
||||
|
||||
dp_display->audio_enabled = false;
|
||||
|
||||
/* triggered by irq_hpd with sink_count = 0 */
|
||||
if (dp->link->sink_count == 0) {
|
||||
/*
|
||||
* irq_hpd with sink_count = 0
|
||||
* hdmi unplugged out of dongle
|
||||
*/
|
||||
dp_ctrl_off_link_stream(dp->ctrl);
|
||||
} else {
|
||||
/*
|
||||
* unplugged interrupt
|
||||
* dongle unplugged out of DUT
|
||||
*/
|
||||
dp_ctrl_off(dp->ctrl);
|
||||
dp->core_initialized = false;
|
||||
dp_display_host_phy_exit(dp);
|
||||
}
|
||||
|
||||
dp_display->power_on = false;
|
||||
@ -1027,7 +1071,7 @@ void msm_dp_snapshot(struct msm_disp_state *disp_state, struct msm_dp *dp)
|
||||
static void dp_display_config_hpd(struct dp_display_private *dp)
|
||||
{
|
||||
|
||||
dp_display_host_init(dp, true);
|
||||
dp_display_host_init(dp);
|
||||
dp_catalog_ctrl_hpd_config(dp->catalog);
|
||||
|
||||
/* Enable interrupt first time
|
||||
@ -1145,8 +1189,9 @@ static irqreturn_t dp_display_irq_handler(int irq, void *dev_id)
|
||||
|
||||
hpd_isr_status = dp_catalog_hpd_get_intr_status(dp->catalog);
|
||||
|
||||
DRM_DEBUG_DP("hpd isr status=%#x\n", hpd_isr_status);
|
||||
if (hpd_isr_status & 0x0F) {
|
||||
DRM_DEBUG_DP("type=%d isr=0x%x\n",
|
||||
dp->dp_display.connector_type, hpd_isr_status);
|
||||
/* hpd related interrupts */
|
||||
if (hpd_isr_status & DP_DP_HPD_PLUG_INT_MASK)
|
||||
dp_add_event(dp, EV_HPD_PLUG_INT, 0, 0);
|
||||
@ -1299,27 +1344,31 @@ static int dp_pm_resume(struct device *dev)
|
||||
|
||||
mutex_lock(&dp->event_mutex);
|
||||
|
||||
DRM_DEBUG_DP("Before, core_inited=%d power_on=%d\n",
|
||||
dp->core_initialized, dp_display->power_on);
|
||||
DRM_DEBUG_DP("Before, type=%d core_inited=%d phy_inited=%d power_on=%d\n",
|
||||
dp->dp_display.connector_type, dp->core_initialized,
|
||||
dp->phy_initialized, dp_display->power_on);
|
||||
|
||||
/* start from disconnected state */
|
||||
dp->hpd_state = ST_DISCONNECTED;
|
||||
|
||||
/* turn on dp ctrl/phy */
|
||||
dp_display_host_init(dp, true);
|
||||
dp_display_host_init(dp);
|
||||
|
||||
dp_catalog_ctrl_hpd_config(dp->catalog);
|
||||
|
||||
/*
|
||||
* set sink to normal operation mode -- D0
|
||||
* before dpcd read
|
||||
*/
|
||||
dp_link_psm_config(dp->link, &dp->panel->link_info, false);
|
||||
|
||||
if (dp_catalog_link_is_connected(dp->catalog)) {
|
||||
/*
|
||||
* set sink to normal operation mode -- D0
|
||||
* before dpcd read
|
||||
*/
|
||||
dp_display_host_phy_init(dp);
|
||||
dp_link_psm_config(dp->link, &dp->panel->link_info, false);
|
||||
sink_count = drm_dp_read_sink_count(dp->aux);
|
||||
if (sink_count < 0)
|
||||
sink_count = 0;
|
||||
|
||||
dp_display_host_phy_exit(dp);
|
||||
}
|
||||
|
||||
dp->link->sink_count = sink_count;
|
||||
@ -1336,9 +1385,11 @@ static int dp_pm_resume(struct device *dev)
|
||||
dp_display_handle_plugged_change(dp_display, false);
|
||||
}
|
||||
|
||||
DRM_DEBUG_DP("After, sink_count=%d is_connected=%d core_inited=%d power_on=%d\n",
|
||||
dp->link->sink_count, dp->dp_display.is_connected,
|
||||
dp->core_initialized, dp_display->power_on);
|
||||
DRM_DEBUG_DP("After, type=%d sink_count=%d is_connected=%d \
|
||||
core_inited=%d phy_inited=%d power_on=%d\n",
|
||||
dp->dp_display.connector_type, dp->link->sink_count,
|
||||
dp->dp_display.is_connected, dp->core_initialized,
|
||||
dp->phy_initialized, dp_display->power_on);
|
||||
|
||||
mutex_unlock(&dp->event_mutex);
|
||||
|
||||
@ -1355,24 +1406,24 @@ static int dp_pm_suspend(struct device *dev)
|
||||
|
||||
mutex_lock(&dp->event_mutex);
|
||||
|
||||
DRM_DEBUG_DP("Before, core_inited=%d power_on=%d\n",
|
||||
dp->core_initialized, dp_display->power_on);
|
||||
DRM_DEBUG_DP("Before, type=%d core_inited=%d phy_inited=%d power_on=%d\n",
|
||||
dp->dp_display.connector_type, dp->core_initialized,
|
||||
dp->phy_initialized, dp_display->power_on);
|
||||
|
||||
if (dp->core_initialized == true) {
|
||||
/* mainlink enabled */
|
||||
if (dp_power_clk_status(dp->power, DP_CTRL_PM))
|
||||
dp_ctrl_off_link_stream(dp->ctrl);
|
||||
/* mainlink enabled */
|
||||
if (dp_power_clk_status(dp->power, DP_CTRL_PM))
|
||||
dp_ctrl_off_link_stream(dp->ctrl);
|
||||
|
||||
dp_display_host_deinit(dp);
|
||||
}
|
||||
dp_display_host_phy_exit(dp);
|
||||
|
||||
/* host_init will be called at pm_resume */
|
||||
dp_display_host_deinit(dp);
|
||||
|
||||
dp->hpd_state = ST_SUSPENDED;
|
||||
|
||||
/* host_init will be called at pm_resume */
|
||||
dp->core_initialized = false;
|
||||
|
||||
DRM_DEBUG_DP("After, core_inited=%d power_on=%d\n",
|
||||
dp->core_initialized, dp_display->power_on);
|
||||
DRM_DEBUG_DP("After, type=%d core_inited=%d phy_inited=%d power_on=%d\n",
|
||||
dp->dp_display.connector_type, dp->core_initialized,
|
||||
dp->phy_initialized, dp_display->power_on);
|
||||
|
||||
mutex_unlock(&dp->event_mutex);
|
||||
|
||||
@ -1460,6 +1511,7 @@ int msm_dp_modeset_init(struct msm_dp *dp_display, struct drm_device *dev,
|
||||
struct drm_encoder *encoder)
|
||||
{
|
||||
struct msm_drm_private *priv;
|
||||
struct dp_display_private *dp_priv;
|
||||
int ret;
|
||||
|
||||
if (WARN_ON(!encoder) || WARN_ON(!dp_display) || WARN_ON(!dev))
|
||||
@ -1468,6 +1520,8 @@ int msm_dp_modeset_init(struct msm_dp *dp_display, struct drm_device *dev,
|
||||
priv = dev->dev_private;
|
||||
dp_display->drm_dev = dev;
|
||||
|
||||
dp_priv = container_of(dp_display, struct dp_display_private, dp_display);
|
||||
|
||||
ret = dp_display_request_irq(dp_display);
|
||||
if (ret) {
|
||||
DRM_ERROR("request_irq failed, ret=%d\n", ret);
|
||||
@ -1485,6 +1539,8 @@ int msm_dp_modeset_init(struct msm_dp *dp_display, struct drm_device *dev,
|
||||
return ret;
|
||||
}
|
||||
|
||||
dp_priv->panel->connector = dp_display->connector;
|
||||
|
||||
priv->connectors[priv->num_connectors++] = dp_display->connector;
|
||||
|
||||
dp_display->bridge = msm_dp_bridge_init(dp_display, dev, encoder);
|
||||
@ -1535,7 +1591,7 @@ int msm_dp_display_enable(struct msm_dp *dp, struct drm_encoder *encoder)
|
||||
state = dp_display->hpd_state;
|
||||
|
||||
if (state == ST_DISPLAY_OFF)
|
||||
dp_display_host_init(dp_display, true);
|
||||
dp_display_host_phy_init(dp_display);
|
||||
|
||||
dp_display_enable(dp_display, 0);
|
||||
|
||||
|
@ -16,7 +16,7 @@ struct msm_dp {
|
||||
struct drm_bridge *bridge;
|
||||
struct drm_connector *connector;
|
||||
struct drm_encoder *encoder;
|
||||
struct drm_bridge *panel_bridge;
|
||||
struct drm_bridge *next_bridge;
|
||||
bool is_connected;
|
||||
bool audio_enabled;
|
||||
bool power_on;
|
||||
|
@ -169,16 +169,6 @@ struct drm_connector *dp_drm_connector_init(struct msm_dp *dp_display)
|
||||
|
||||
drm_connector_attach_encoder(connector, dp_display->encoder);
|
||||
|
||||
if (dp_display->panel_bridge) {
|
||||
ret = drm_bridge_attach(dp_display->encoder,
|
||||
dp_display->panel_bridge, NULL,
|
||||
DRM_BRIDGE_ATTACH_NO_CONNECTOR);
|
||||
if (ret < 0) {
|
||||
DRM_ERROR("failed to attach panel bridge: %d\n", ret);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
}
|
||||
|
||||
return connector;
|
||||
}
|
||||
|
||||
@ -246,5 +236,16 @@ struct drm_bridge *msm_dp_bridge_init(struct msm_dp *dp_display, struct drm_devi
|
||||
return ERR_PTR(rc);
|
||||
}
|
||||
|
||||
if (dp_display->next_bridge) {
|
||||
rc = drm_bridge_attach(dp_display->encoder,
|
||||
dp_display->next_bridge, bridge,
|
||||
DRM_BRIDGE_ATTACH_NO_CONNECTOR);
|
||||
if (rc < 0) {
|
||||
DRM_ERROR("failed to attach panel bridge: %d\n", rc);
|
||||
drm_bridge_remove(bridge);
|
||||
return ERR_PTR(rc);
|
||||
}
|
||||
}
|
||||
|
||||
return bridge;
|
||||
}
|
||||
|
@ -212,6 +212,11 @@ int dp_panel_read_sink_caps(struct dp_panel *dp_panel,
|
||||
if (drm_add_modes_noedid(connector, 640, 480))
|
||||
drm_set_preferred_mode(connector, 640, 480);
|
||||
mutex_unlock(&connector->dev->mode_config.mutex);
|
||||
} else {
|
||||
/* always add fail-safe mode as backup mode */
|
||||
mutex_lock(&connector->dev->mode_config.mutex);
|
||||
drm_add_modes_noedid(connector, 640, 480);
|
||||
mutex_unlock(&connector->dev->mode_config.mutex);
|
||||
}
|
||||
|
||||
if (panel->aux_cfg_update_done) {
|
||||
|
@ -265,23 +265,16 @@ static int dp_parser_clock(struct dp_parser *parser)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int dp_parser_find_panel(struct dp_parser *parser)
|
||||
static int dp_parser_find_next_bridge(struct dp_parser *parser)
|
||||
{
|
||||
struct device *dev = &parser->pdev->dev;
|
||||
struct drm_panel *panel;
|
||||
int rc;
|
||||
struct drm_bridge *bridge;
|
||||
|
||||
rc = drm_of_find_panel_or_bridge(dev->of_node, 1, 0, &panel, NULL);
|
||||
if (rc) {
|
||||
DRM_ERROR("failed to acquire DRM panel: %d\n", rc);
|
||||
return rc;
|
||||
}
|
||||
bridge = devm_drm_of_get_bridge(dev, dev->of_node, 1, 0);
|
||||
if (IS_ERR(bridge))
|
||||
return PTR_ERR(bridge);
|
||||
|
||||
parser->panel_bridge = devm_drm_panel_bridge_add(dev, panel);
|
||||
if (IS_ERR(parser->panel_bridge)) {
|
||||
DRM_ERROR("failed to create panel bridge\n");
|
||||
return PTR_ERR(parser->panel_bridge);
|
||||
}
|
||||
parser->next_bridge = bridge;
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -307,10 +300,23 @@ static int dp_parser_parse(struct dp_parser *parser, int connector_type)
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
if (connector_type == DRM_MODE_CONNECTOR_eDP) {
|
||||
rc = dp_parser_find_panel(parser);
|
||||
if (rc)
|
||||
/*
|
||||
* External bridges are mandatory for eDP interfaces: one has to
|
||||
* provide at least an eDP panel (which gets wrapped into panel-bridge).
|
||||
*
|
||||
* For DisplayPort interfaces external bridges are optional, so
|
||||
* silently ignore an error if one is not present (-ENODEV).
|
||||
*/
|
||||
rc = dp_parser_find_next_bridge(parser);
|
||||
if (rc == -ENODEV) {
|
||||
if (connector_type == DRM_MODE_CONNECTOR_eDP) {
|
||||
DRM_ERROR("eDP: next bridge is not present\n");
|
||||
return rc;
|
||||
}
|
||||
} else if (rc) {
|
||||
if (rc != -EPROBE_DEFER)
|
||||
DRM_ERROR("DP: error parsing next bridge: %d\n", rc);
|
||||
return rc;
|
||||
}
|
||||
|
||||
/* Map the corresponding regulator information according to
|
||||
|
@ -123,7 +123,7 @@ struct dp_parser {
|
||||
struct dp_display_data disp_data;
|
||||
const struct dp_regulator_cfg *regulator_cfg;
|
||||
u32 max_dp_lanes;
|
||||
struct drm_bridge *panel_bridge;
|
||||
struct drm_bridge *next_bridge;
|
||||
|
||||
int (*parse)(struct dp_parser *parser, int connector_type);
|
||||
};
|
||||
|
@ -4,6 +4,7 @@
|
||||
*/
|
||||
|
||||
#include "dsi.h"
|
||||
#include "dsi_cfg.h"
|
||||
|
||||
struct drm_encoder *msm_dsi_get_encoder(struct msm_dsi *msm_dsi)
|
||||
{
|
||||
@ -175,7 +176,8 @@ static int dsi_dev_remove(struct platform_device *pdev)
|
||||
}
|
||||
|
||||
static const struct of_device_id dt_match[] = {
|
||||
{ .compatible = "qcom,mdss-dsi-ctrl" },
|
||||
{ .compatible = "qcom,mdss-dsi-ctrl", .data = NULL /* autodetect cfg */ },
|
||||
{ .compatible = "qcom,dsi-ctrl-6g-qcm2290", .data = &qcm2290_dsi_cfg_handler },
|
||||
{}
|
||||
};
|
||||
|
||||
|
@ -213,6 +213,24 @@ static const struct msm_dsi_config sc7280_dsi_cfg = {
|
||||
.num_dsi = 1,
|
||||
};
|
||||
|
||||
static const char * const dsi_qcm2290_bus_clk_names[] = {
|
||||
"iface", "bus",
|
||||
};
|
||||
|
||||
static const struct msm_dsi_config qcm2290_dsi_cfg = {
|
||||
.io_offset = DSI_6G_REG_SHIFT,
|
||||
.reg_cfg = {
|
||||
.num = 1,
|
||||
.regs = {
|
||||
{"vdda", 21800, 4 }, /* 1.2 V */
|
||||
},
|
||||
},
|
||||
.bus_clk_names = dsi_qcm2290_bus_clk_names,
|
||||
.num_bus_clks = ARRAY_SIZE(dsi_qcm2290_bus_clk_names),
|
||||
.io_start = { 0x5e94000 },
|
||||
.num_dsi = 1,
|
||||
};
|
||||
|
||||
static const struct msm_dsi_host_cfg_ops msm_dsi_v2_host_ops = {
|
||||
.link_clk_set_rate = dsi_link_clk_set_rate_v2,
|
||||
.link_clk_enable = dsi_link_clk_enable_v2,
|
||||
@ -300,3 +318,8 @@ const struct msm_dsi_cfg_handler *msm_dsi_cfg_get(u32 major, u32 minor)
|
||||
return cfg_hnd;
|
||||
}
|
||||
|
||||
/* Non autodetect configs */
|
||||
const struct msm_dsi_cfg_handler qcm2290_dsi_cfg_handler = {
|
||||
.cfg = &qcm2290_dsi_cfg,
|
||||
.ops = &msm_dsi_6g_v2_host_ops,
|
||||
};
|
||||
|
@ -60,5 +60,8 @@ struct msm_dsi_cfg_handler {
|
||||
|
||||
const struct msm_dsi_cfg_handler *msm_dsi_cfg_get(u32 major, u32 minor);
|
||||
|
||||
/* Non autodetect configs */
|
||||
extern const struct msm_dsi_cfg_handler qcm2290_dsi_cfg_handler;
|
||||
|
||||
#endif /* __MSM_DSI_CFG_H__ */
|
||||
|
||||
|
@ -212,6 +212,10 @@ static const struct msm_dsi_cfg_handler *dsi_get_config(
|
||||
int ret;
|
||||
u32 major = 0, minor = 0;
|
||||
|
||||
cfg_hnd = device_get_match_data(dev);
|
||||
if (cfg_hnd)
|
||||
return cfg_hnd;
|
||||
|
||||
ahb_clk = msm_clk_get(msm_host->pdev, "iface");
|
||||
if (IS_ERR(ahb_clk)) {
|
||||
pr_err("%s: cannot get interface clock\n", __func__);
|
||||
@ -1813,7 +1817,7 @@ int msm_dsi_host_init(struct msm_dsi *msm_dsi)
|
||||
goto fail;
|
||||
}
|
||||
|
||||
msm_host->ctrl_base = msm_ioremap_size(pdev, "dsi_ctrl", "DSI CTRL", &msm_host->ctrl_size);
|
||||
msm_host->ctrl_base = msm_ioremap_size(pdev, "dsi_ctrl", &msm_host->ctrl_size);
|
||||
if (IS_ERR(msm_host->ctrl_base)) {
|
||||
pr_err("%s: unable to map Dsi ctrl base\n", __func__);
|
||||
ret = PTR_ERR(msm_host->ctrl_base);
|
||||
@ -1877,7 +1881,7 @@ int msm_dsi_host_init(struct msm_dsi *msm_dsi)
|
||||
|
||||
/* do not autoenable, will be enabled later */
|
||||
ret = devm_request_irq(&pdev->dev, msm_host->irq, dsi_host_irq,
|
||||
IRQF_TRIGGER_HIGH | IRQF_ONESHOT | IRQF_NO_AUTOEN,
|
||||
IRQF_TRIGGER_HIGH | IRQF_NO_AUTOEN,
|
||||
"dsi_isr", msm_host);
|
||||
if (ret < 0) {
|
||||
dev_err(&pdev->dev, "failed to request IRQ%u: %d\n",
|
||||
|
@ -305,27 +305,6 @@ static int dsi_mgr_connector_get_modes(struct drm_connector *connector)
|
||||
return num;
|
||||
}
|
||||
|
||||
static enum drm_mode_status dsi_mgr_connector_mode_valid(struct drm_connector *connector,
|
||||
struct drm_display_mode *mode)
|
||||
{
|
||||
int id = dsi_mgr_connector_get_id(connector);
|
||||
struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
|
||||
struct drm_encoder *encoder = msm_dsi_get_encoder(msm_dsi);
|
||||
struct msm_drm_private *priv = connector->dev->dev_private;
|
||||
struct msm_kms *kms = priv->kms;
|
||||
long actual, requested;
|
||||
|
||||
DBG("");
|
||||
requested = 1000 * mode->clock;
|
||||
actual = kms->funcs->round_pixclk(kms, requested, encoder);
|
||||
|
||||
DBG("requested=%ld, actual=%ld", requested, actual);
|
||||
if (actual != requested)
|
||||
return MODE_CLOCK_RANGE;
|
||||
|
||||
return MODE_OK;
|
||||
}
|
||||
|
||||
static struct drm_encoder *
|
||||
dsi_mgr_connector_best_encoder(struct drm_connector *connector)
|
||||
{
|
||||
@ -336,13 +315,12 @@ dsi_mgr_connector_best_encoder(struct drm_connector *connector)
|
||||
return msm_dsi_get_encoder(msm_dsi);
|
||||
}
|
||||
|
||||
static void dsi_mgr_bridge_pre_enable(struct drm_bridge *bridge)
|
||||
static void dsi_mgr_bridge_power_on(struct drm_bridge *bridge)
|
||||
{
|
||||
int id = dsi_mgr_bridge_get_id(bridge);
|
||||
struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
|
||||
struct msm_dsi *msm_dsi1 = dsi_mgr_get_dsi(DSI_1);
|
||||
struct mipi_dsi_host *host = msm_dsi->host;
|
||||
struct drm_panel *panel = msm_dsi->panel;
|
||||
struct msm_dsi_phy_shared_timings phy_shared_timings[DSI_MAX];
|
||||
bool is_bonded_dsi = IS_BONDED_DSI();
|
||||
int ret;
|
||||
@ -383,6 +361,34 @@ static void dsi_mgr_bridge_pre_enable(struct drm_bridge *bridge)
|
||||
if (is_bonded_dsi && msm_dsi1)
|
||||
msm_dsi_host_enable_irq(msm_dsi1->host);
|
||||
|
||||
return;
|
||||
|
||||
host1_on_fail:
|
||||
msm_dsi_host_power_off(host);
|
||||
host_on_fail:
|
||||
dsi_mgr_phy_disable(id);
|
||||
phy_en_fail:
|
||||
return;
|
||||
}
|
||||
|
||||
static void dsi_mgr_bridge_pre_enable(struct drm_bridge *bridge)
|
||||
{
|
||||
int id = dsi_mgr_bridge_get_id(bridge);
|
||||
struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
|
||||
struct msm_dsi *msm_dsi1 = dsi_mgr_get_dsi(DSI_1);
|
||||
struct mipi_dsi_host *host = msm_dsi->host;
|
||||
struct drm_panel *panel = msm_dsi->panel;
|
||||
bool is_bonded_dsi = IS_BONDED_DSI();
|
||||
int ret;
|
||||
|
||||
DBG("id=%d", id);
|
||||
if (!msm_dsi_device_connected(msm_dsi))
|
||||
return;
|
||||
|
||||
/* Do nothing with the host if it is slave-DSI in case of bonded DSI */
|
||||
if (is_bonded_dsi && !IS_MASTER_DSI_LINK(id))
|
||||
return;
|
||||
|
||||
/* Always call panel functions once, because even for dual panels,
|
||||
* there is only one drm_panel instance.
|
||||
*/
|
||||
@ -417,17 +423,7 @@ host_en_fail:
|
||||
if (panel)
|
||||
drm_panel_unprepare(panel);
|
||||
panel_prep_fail:
|
||||
msm_dsi_host_disable_irq(host);
|
||||
if (is_bonded_dsi && msm_dsi1)
|
||||
msm_dsi_host_disable_irq(msm_dsi1->host);
|
||||
|
||||
if (is_bonded_dsi && msm_dsi1)
|
||||
msm_dsi_host_power_off(msm_dsi1->host);
|
||||
host1_on_fail:
|
||||
msm_dsi_host_power_off(host);
|
||||
host_on_fail:
|
||||
dsi_mgr_phy_disable(id);
|
||||
phy_en_fail:
|
||||
return;
|
||||
}
|
||||
|
||||
@ -573,6 +569,8 @@ static void dsi_mgr_bridge_mode_set(struct drm_bridge *bridge,
|
||||
msm_dsi_host_set_display_mode(host, adjusted_mode);
|
||||
if (is_bonded_dsi && other_dsi)
|
||||
msm_dsi_host_set_display_mode(other_dsi->host, adjusted_mode);
|
||||
|
||||
dsi_mgr_bridge_power_on(bridge);
|
||||
}
|
||||
|
||||
static const struct drm_connector_funcs dsi_mgr_connector_funcs = {
|
||||
@ -586,7 +584,6 @@ static const struct drm_connector_funcs dsi_mgr_connector_funcs = {
|
||||
|
||||
static const struct drm_connector_helper_funcs dsi_mgr_conn_helper_funcs = {
|
||||
.get_modes = dsi_mgr_connector_get_modes,
|
||||
.mode_valid = dsi_mgr_connector_mode_valid,
|
||||
.best_encoder = dsi_mgr_connector_best_encoder,
|
||||
};
|
||||
|
||||
|
@ -709,14 +709,14 @@ static int dsi_phy_driver_probe(struct platform_device *pdev)
|
||||
if (!of_property_read_u32(dev->of_node, "phy-type", &phy_type))
|
||||
phy->cphy_mode = (phy_type == PHY_TYPE_CPHY);
|
||||
|
||||
phy->base = msm_ioremap_size(pdev, "dsi_phy", "DSI_PHY", &phy->base_size);
|
||||
phy->base = msm_ioremap_size(pdev, "dsi_phy", &phy->base_size);
|
||||
if (IS_ERR(phy->base)) {
|
||||
DRM_DEV_ERROR(dev, "%s: failed to map phy base\n", __func__);
|
||||
ret = -ENOMEM;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
phy->pll_base = msm_ioremap_size(pdev, "dsi_pll", "DSI_PLL", &phy->pll_size);
|
||||
phy->pll_base = msm_ioremap_size(pdev, "dsi_pll", &phy->pll_size);
|
||||
if (IS_ERR(phy->pll_base)) {
|
||||
DRM_DEV_ERROR(&pdev->dev, "%s: failed to map pll base\n", __func__);
|
||||
ret = -ENOMEM;
|
||||
@ -724,7 +724,7 @@ static int dsi_phy_driver_probe(struct platform_device *pdev)
|
||||
}
|
||||
|
||||
if (phy->cfg->has_phy_lane) {
|
||||
phy->lane_base = msm_ioremap_size(pdev, "dsi_phy_lane", "DSI_PHY_LANE", &phy->lane_size);
|
||||
phy->lane_base = msm_ioremap_size(pdev, "dsi_phy_lane", &phy->lane_size);
|
||||
if (IS_ERR(phy->lane_base)) {
|
||||
DRM_DEV_ERROR(&pdev->dev, "%s: failed to map phy lane base\n", __func__);
|
||||
ret = -ENOMEM;
|
||||
@ -733,7 +733,7 @@ static int dsi_phy_driver_probe(struct platform_device *pdev)
|
||||
}
|
||||
|
||||
if (phy->cfg->has_phy_regulator) {
|
||||
phy->reg_base = msm_ioremap_size(pdev, "dsi_phy_regulator", "DSI_PHY_REG", &phy->reg_size);
|
||||
phy->reg_base = msm_ioremap_size(pdev, "dsi_phy_regulator", &phy->reg_size);
|
||||
if (IS_ERR(phy->reg_base)) {
|
||||
DRM_DEV_ERROR(&pdev->dev, "%s: failed to map phy regulator base\n", __func__);
|
||||
ret = -ENOMEM;
|
||||
@ -741,6 +741,12 @@ static int dsi_phy_driver_probe(struct platform_device *pdev)
|
||||
}
|
||||
}
|
||||
|
||||
if (phy->cfg->ops.parse_dt_properties) {
|
||||
ret = phy->cfg->ops.parse_dt_properties(phy);
|
||||
if (ret)
|
||||
goto fail;
|
||||
}
|
||||
|
||||
ret = dsi_phy_regulator_init(phy);
|
||||
if (ret)
|
||||
goto fail;
|
||||
|
@ -25,6 +25,7 @@ struct msm_dsi_phy_ops {
|
||||
void (*save_pll_state)(struct msm_dsi_phy *phy);
|
||||
int (*restore_pll_state)(struct msm_dsi_phy *phy);
|
||||
bool (*set_continuous_clock)(struct msm_dsi_phy *phy, bool enable);
|
||||
int (*parse_dt_properties)(struct msm_dsi_phy *phy);
|
||||
};
|
||||
|
||||
struct msm_dsi_phy_cfg {
|
||||
@ -82,6 +83,8 @@ struct msm_dsi_dphy_timing {
|
||||
#define DSI_PIXEL_PLL_CLK 1
|
||||
#define NUM_PROVIDED_CLKS 2
|
||||
|
||||
#define DSI_LANE_MAX 5
|
||||
|
||||
struct msm_dsi_phy {
|
||||
struct platform_device *pdev;
|
||||
void __iomem *base;
|
||||
@ -99,6 +102,7 @@ struct msm_dsi_phy {
|
||||
|
||||
struct msm_dsi_dphy_timing timing;
|
||||
const struct msm_dsi_phy_cfg *cfg;
|
||||
void *tuning_cfg;
|
||||
|
||||
enum msm_dsi_phy_usecase usecase;
|
||||
bool regulator_ldo_mode;
|
||||
|
@ -83,6 +83,18 @@ struct dsi_pll_10nm {
|
||||
|
||||
#define to_pll_10nm(x) container_of(x, struct dsi_pll_10nm, clk_hw)
|
||||
|
||||
/**
|
||||
* struct dsi_phy_10nm_tuning_cfg - Holds 10nm PHY tuning config parameters.
|
||||
* @rescode_offset_top: Offset for pull-up legs rescode.
|
||||
* @rescode_offset_bot: Offset for pull-down legs rescode.
|
||||
* @vreg_ctrl: vreg ctrl to drive LDO level
|
||||
*/
|
||||
struct dsi_phy_10nm_tuning_cfg {
|
||||
u8 rescode_offset_top[DSI_LANE_MAX];
|
||||
u8 rescode_offset_bot[DSI_LANE_MAX];
|
||||
u8 vreg_ctrl;
|
||||
};
|
||||
|
||||
/*
|
||||
* Global list of private DSI PLL struct pointers. We need this for bonded DSI
|
||||
* mode, where the master PLL's clk_ops needs access the slave's private data
|
||||
@ -562,7 +574,9 @@ static int pll_10nm_register(struct dsi_pll_10nm *pll_10nm, struct clk_hw **prov
|
||||
char clk_name[32], parent[32], vco_name[32];
|
||||
char parent2[32], parent3[32], parent4[32];
|
||||
struct clk_init_data vco_init = {
|
||||
.parent_names = (const char *[]){ "xo" },
|
||||
.parent_data = &(const struct clk_parent_data) {
|
||||
.fw_name = "ref",
|
||||
},
|
||||
.num_parents = 1,
|
||||
.name = vco_name,
|
||||
.flags = CLK_IGNORE_UNUSED,
|
||||
@ -747,6 +761,7 @@ static void dsi_phy_hw_v3_0_lane_settings(struct msm_dsi_phy *phy)
|
||||
int i;
|
||||
u8 tx_dctrl[] = { 0x00, 0x00, 0x00, 0x04, 0x01 };
|
||||
void __iomem *lane_base = phy->lane_base;
|
||||
struct dsi_phy_10nm_tuning_cfg *tuning_cfg = phy->tuning_cfg;
|
||||
|
||||
if (phy->cfg->quirks & DSI_PHY_10NM_QUIRK_OLD_TIMINGS)
|
||||
tx_dctrl[3] = 0x02;
|
||||
@ -775,10 +790,13 @@ static void dsi_phy_hw_v3_0_lane_settings(struct msm_dsi_phy *phy)
|
||||
dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_CFG2(i), 0x0);
|
||||
dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_CFG3(i),
|
||||
i == 4 ? 0x80 : 0x0);
|
||||
dsi_phy_write(lane_base +
|
||||
REG_DSI_10nm_PHY_LN_OFFSET_TOP_CTRL(i), 0x0);
|
||||
dsi_phy_write(lane_base +
|
||||
REG_DSI_10nm_PHY_LN_OFFSET_BOT_CTRL(i), 0x0);
|
||||
|
||||
/* platform specific dsi phy drive strength adjustment */
|
||||
dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_OFFSET_TOP_CTRL(i),
|
||||
tuning_cfg->rescode_offset_top[i]);
|
||||
dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_OFFSET_BOT_CTRL(i),
|
||||
tuning_cfg->rescode_offset_bot[i]);
|
||||
|
||||
dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_TX_DCTRL(i),
|
||||
tx_dctrl[i]);
|
||||
}
|
||||
@ -799,6 +817,7 @@ static int dsi_10nm_phy_enable(struct msm_dsi_phy *phy,
|
||||
u32 const timeout_us = 1000;
|
||||
struct msm_dsi_dphy_timing *timing = &phy->timing;
|
||||
void __iomem *base = phy->base;
|
||||
struct dsi_phy_10nm_tuning_cfg *tuning_cfg = phy->tuning_cfg;
|
||||
u32 data;
|
||||
|
||||
DBG("");
|
||||
@ -834,8 +853,9 @@ static int dsi_10nm_phy_enable(struct msm_dsi_phy *phy,
|
||||
/* Select MS1 byte-clk */
|
||||
dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_GLBL_CTRL, 0x10);
|
||||
|
||||
/* Enable LDO */
|
||||
dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_VREG_CTRL, 0x59);
|
||||
/* Enable LDO with platform specific drive level/amplitude adjustment */
|
||||
dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_VREG_CTRL,
|
||||
tuning_cfg->vreg_ctrl);
|
||||
|
||||
/* Configure PHY lane swap (TODO: we need to calculate this) */
|
||||
dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_LANE_CFG0, 0x21);
|
||||
@ -922,6 +942,92 @@ static void dsi_10nm_phy_disable(struct msm_dsi_phy *phy)
|
||||
DBG("DSI%d PHY disabled", phy->id);
|
||||
}
|
||||
|
||||
static int dsi_10nm_phy_parse_dt(struct msm_dsi_phy *phy)
|
||||
{
|
||||
struct device *dev = &phy->pdev->dev;
|
||||
struct dsi_phy_10nm_tuning_cfg *tuning_cfg;
|
||||
s8 offset_top[DSI_LANE_MAX] = { 0 }; /* No offset */
|
||||
s8 offset_bot[DSI_LANE_MAX] = { 0 }; /* No offset */
|
||||
u32 ldo_level = 400; /* 400mV */
|
||||
u8 level;
|
||||
int ret, i;
|
||||
|
||||
tuning_cfg = devm_kzalloc(dev, sizeof(*tuning_cfg), GFP_KERNEL);
|
||||
if (!tuning_cfg)
|
||||
return -ENOMEM;
|
||||
|
||||
/* Drive strength adjustment parameters */
|
||||
ret = of_property_read_u8_array(dev->of_node, "qcom,phy-rescode-offset-top",
|
||||
offset_top, DSI_LANE_MAX);
|
||||
if (ret && ret != -EINVAL) {
|
||||
DRM_DEV_ERROR(dev, "failed to parse qcom,phy-rescode-offset-top, %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
for (i = 0; i < DSI_LANE_MAX; i++) {
|
||||
if (offset_top[i] < -32 || offset_top[i] > 31) {
|
||||
DRM_DEV_ERROR(dev,
|
||||
"qcom,phy-rescode-offset-top value %d is not in range [-32..31]\n",
|
||||
offset_top[i]);
|
||||
return -EINVAL;
|
||||
}
|
||||
tuning_cfg->rescode_offset_top[i] = 0x3f & offset_top[i];
|
||||
}
|
||||
|
||||
ret = of_property_read_u8_array(dev->of_node, "qcom,phy-rescode-offset-bot",
|
||||
offset_bot, DSI_LANE_MAX);
|
||||
if (ret && ret != -EINVAL) {
|
||||
DRM_DEV_ERROR(dev, "failed to parse qcom,phy-rescode-offset-bot, %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
for (i = 0; i < DSI_LANE_MAX; i++) {
|
||||
if (offset_bot[i] < -32 || offset_bot[i] > 31) {
|
||||
DRM_DEV_ERROR(dev,
|
||||
"qcom,phy-rescode-offset-bot value %d is not in range [-32..31]\n",
|
||||
offset_bot[i]);
|
||||
return -EINVAL;
|
||||
}
|
||||
tuning_cfg->rescode_offset_bot[i] = 0x3f & offset_bot[i];
|
||||
}
|
||||
|
||||
/* Drive level/amplitude adjustment parameters */
|
||||
ret = of_property_read_u32(dev->of_node, "qcom,phy-drive-ldo-level", &ldo_level);
|
||||
if (ret && ret != -EINVAL) {
|
||||
DRM_DEV_ERROR(dev, "failed to parse qcom,phy-drive-ldo-level, %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
switch (ldo_level) {
|
||||
case 375:
|
||||
level = 0;
|
||||
break;
|
||||
case 400:
|
||||
level = 1;
|
||||
break;
|
||||
case 425:
|
||||
level = 2;
|
||||
break;
|
||||
case 450:
|
||||
level = 3;
|
||||
break;
|
||||
case 475:
|
||||
level = 4;
|
||||
break;
|
||||
case 500:
|
||||
level = 5;
|
||||
break;
|
||||
default:
|
||||
DRM_DEV_ERROR(dev, "qcom,phy-drive-ldo-level %d is not supported\n", ldo_level);
|
||||
return -EINVAL;
|
||||
}
|
||||
tuning_cfg->vreg_ctrl = 0x58 | (0x7 & level);
|
||||
|
||||
phy->tuning_cfg = tuning_cfg;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
const struct msm_dsi_phy_cfg dsi_phy_10nm_cfgs = {
|
||||
.has_phy_lane = true,
|
||||
.reg_cfg = {
|
||||
@ -936,6 +1042,7 @@ const struct msm_dsi_phy_cfg dsi_phy_10nm_cfgs = {
|
||||
.pll_init = dsi_pll_10nm_init,
|
||||
.save_pll_state = dsi_10nm_pll_save_state,
|
||||
.restore_pll_state = dsi_10nm_pll_restore_state,
|
||||
.parse_dt_properties = dsi_10nm_phy_parse_dt,
|
||||
},
|
||||
.min_pll_rate = 1000000000UL,
|
||||
.max_pll_rate = 3500000000UL,
|
||||
@ -957,6 +1064,7 @@ const struct msm_dsi_phy_cfg dsi_phy_10nm_8998_cfgs = {
|
||||
.pll_init = dsi_pll_10nm_init,
|
||||
.save_pll_state = dsi_10nm_pll_save_state,
|
||||
.restore_pll_state = dsi_10nm_pll_restore_state,
|
||||
.parse_dt_properties = dsi_10nm_phy_parse_dt,
|
||||
},
|
||||
.min_pll_rate = 1000000000UL,
|
||||
.max_pll_rate = 3500000000UL,
|
||||
|
@ -802,7 +802,9 @@ static int pll_14nm_register(struct dsi_pll_14nm *pll_14nm, struct clk_hw **prov
|
||||
{
|
||||
char clk_name[32], parent[32], vco_name[32];
|
||||
struct clk_init_data vco_init = {
|
||||
.parent_names = (const char *[]){ "xo" },
|
||||
.parent_data = &(const struct clk_parent_data) {
|
||||
.fw_name = "ref",
|
||||
},
|
||||
.num_parents = 1,
|
||||
.name = vco_name,
|
||||
.flags = CLK_IGNORE_UNUSED,
|
||||
|
@ -521,7 +521,9 @@ static int pll_28nm_register(struct dsi_pll_28nm *pll_28nm, struct clk_hw **prov
|
||||
{
|
||||
char clk_name[32], parent1[32], parent2[32], vco_name[32];
|
||||
struct clk_init_data vco_init = {
|
||||
.parent_names = (const char *[]){ "xo" },
|
||||
.parent_data = &(const struct clk_parent_data) {
|
||||
.fw_name = "ref", .name = "xo",
|
||||
},
|
||||
.num_parents = 1,
|
||||
.name = vco_name,
|
||||
.flags = CLK_IGNORE_UNUSED,
|
||||
|
@ -385,7 +385,9 @@ static int pll_28nm_register(struct dsi_pll_28nm *pll_28nm, struct clk_hw **prov
|
||||
{
|
||||
char *clk_name, *parent_name, *vco_name;
|
||||
struct clk_init_data vco_init = {
|
||||
.parent_names = (const char *[]){ "pxo" },
|
||||
.parent_data = &(const struct clk_parent_data) {
|
||||
.fw_name = "ref",
|
||||
},
|
||||
.num_parents = 1,
|
||||
.flags = CLK_IGNORE_UNUSED,
|
||||
.ops = &clk_ops_dsi_pll_28nm_vco,
|
||||
|
@ -588,7 +588,9 @@ static int pll_7nm_register(struct dsi_pll_7nm *pll_7nm, struct clk_hw **provide
|
||||
char clk_name[32], parent[32], vco_name[32];
|
||||
char parent2[32], parent3[32], parent4[32];
|
||||
struct clk_init_data vco_init = {
|
||||
.parent_names = (const char *[]){ "bi_tcxo" },
|
||||
.parent_data = &(const struct clk_parent_data) {
|
||||
.fw_name = "ref",
|
||||
},
|
||||
.num_parents = 1,
|
||||
.name = vco_name,
|
||||
.flags = CLK_IGNORE_UNUSED,
|
||||
@ -862,20 +864,26 @@ static int dsi_7nm_phy_enable(struct msm_dsi_phy *phy,
|
||||
/* Alter PHY configurations if data rate less than 1.5GHZ*/
|
||||
less_than_1500_mhz = (clk_req->bitclk_rate <= 1500000000);
|
||||
|
||||
/* For C-PHY, no low power settings for lower clk rate */
|
||||
if (phy->cphy_mode)
|
||||
less_than_1500_mhz = false;
|
||||
|
||||
if (phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V4_1) {
|
||||
vreg_ctrl_0 = less_than_1500_mhz ? 0x53 : 0x52;
|
||||
glbl_rescode_top_ctrl = less_than_1500_mhz ? 0x3d : 0x00;
|
||||
glbl_rescode_bot_ctrl = less_than_1500_mhz ? 0x39 : 0x3c;
|
||||
if (phy->cphy_mode) {
|
||||
glbl_rescode_top_ctrl = 0x00;
|
||||
glbl_rescode_bot_ctrl = 0x3c;
|
||||
} else {
|
||||
glbl_rescode_top_ctrl = less_than_1500_mhz ? 0x3d : 0x00;
|
||||
glbl_rescode_bot_ctrl = less_than_1500_mhz ? 0x39 : 0x3c;
|
||||
}
|
||||
glbl_str_swi_cal_sel_ctrl = 0x00;
|
||||
glbl_hstx_str_ctrl_0 = 0x88;
|
||||
} else {
|
||||
vreg_ctrl_0 = less_than_1500_mhz ? 0x5B : 0x59;
|
||||
glbl_str_swi_cal_sel_ctrl = less_than_1500_mhz ? 0x03 : 0x00;
|
||||
glbl_hstx_str_ctrl_0 = less_than_1500_mhz ? 0x66 : 0x88;
|
||||
if (phy->cphy_mode) {
|
||||
glbl_str_swi_cal_sel_ctrl = 0x03;
|
||||
glbl_hstx_str_ctrl_0 = 0x66;
|
||||
} else {
|
||||
glbl_str_swi_cal_sel_ctrl = less_than_1500_mhz ? 0x03 : 0x00;
|
||||
glbl_hstx_str_ctrl_0 = less_than_1500_mhz ? 0x66 : 0x88;
|
||||
}
|
||||
glbl_rescode_top_ctrl = 0x03;
|
||||
glbl_rescode_bot_ctrl = 0x3c;
|
||||
}
|
||||
|
@ -133,7 +133,7 @@ static struct hdmi *msm_hdmi_init(struct platform_device *pdev)
|
||||
hdmi->config = config;
|
||||
spin_lock_init(&hdmi->reg_lock);
|
||||
|
||||
hdmi->mmio = msm_ioremap(pdev, config->mmio_name, "HDMI");
|
||||
hdmi->mmio = msm_ioremap(pdev, config->mmio_name);
|
||||
if (IS_ERR(hdmi->mmio)) {
|
||||
ret = PTR_ERR(hdmi->mmio);
|
||||
goto fail;
|
||||
@ -144,8 +144,7 @@ static struct hdmi *msm_hdmi_init(struct platform_device *pdev)
|
||||
config->mmio_name);
|
||||
hdmi->mmio_phy_addr = res->start;
|
||||
|
||||
hdmi->qfprom_mmio = msm_ioremap(pdev,
|
||||
config->qfprom_mmio_name, "HDMI_QFPROM");
|
||||
hdmi->qfprom_mmio = msm_ioremap(pdev, config->qfprom_mmio_name);
|
||||
if (IS_ERR(hdmi->qfprom_mmio)) {
|
||||
DRM_DEV_INFO(&pdev->dev, "can't find qfprom resource\n");
|
||||
hdmi->qfprom_mmio = NULL;
|
||||
@ -306,7 +305,7 @@ int msm_hdmi_modeset_init(struct hdmi *hdmi,
|
||||
}
|
||||
|
||||
ret = devm_request_irq(&pdev->dev, hdmi->irq,
|
||||
msm_hdmi_irq, IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
|
||||
msm_hdmi_irq, IRQF_TRIGGER_HIGH,
|
||||
"hdmi_isr", hdmi);
|
||||
if (ret < 0) {
|
||||
DRM_DEV_ERROR(dev->dev, "failed to request IRQ%u: %d\n",
|
||||
|
@ -282,15 +282,18 @@ static enum drm_mode_status msm_hdmi_bridge_mode_valid(struct drm_bridge *bridge
|
||||
long actual, requested;
|
||||
|
||||
requested = 1000 * mode->clock;
|
||||
actual = kms->funcs->round_pixclk(kms,
|
||||
requested, hdmi_bridge->hdmi->encoder);
|
||||
|
||||
/* for mdp5/apq8074, we manage our own pixel clk (as opposed to
|
||||
* mdp4/dtv stuff where pixel clk is assigned to mdp/encoder
|
||||
* instead):
|
||||
*/
|
||||
if (config->pwr_clk_cnt > 0)
|
||||
actual = clk_round_rate(hdmi->pwr_clks[0], actual);
|
||||
if (kms->funcs->round_pixclk)
|
||||
actual = kms->funcs->round_pixclk(kms,
|
||||
requested, hdmi_bridge->hdmi->encoder);
|
||||
else if (config->pwr_clk_cnt > 0)
|
||||
actual = clk_round_rate(hdmi->pwr_clks[0], requested);
|
||||
else
|
||||
actual = requested;
|
||||
|
||||
DBG("requested=%ld, actual=%ld", requested, actual);
|
||||
|
||||
|
@ -144,7 +144,7 @@ static int msm_hdmi_phy_probe(struct platform_device *pdev)
|
||||
if (!phy->cfg)
|
||||
return -ENODEV;
|
||||
|
||||
phy->mmio = msm_ioremap(pdev, "hdmi_phy", "HDMI_PHY");
|
||||
phy->mmio = msm_ioremap(pdev, "hdmi_phy");
|
||||
if (IS_ERR(phy->mmio)) {
|
||||
DRM_DEV_ERROR(dev, "%s: failed to map phy base\n", __func__);
|
||||
return -ENOMEM;
|
||||
|
@ -716,19 +716,18 @@ int msm_hdmi_pll_8996_init(struct platform_device *pdev)
|
||||
|
||||
pll->pdev = pdev;
|
||||
|
||||
pll->mmio_qserdes_com = msm_ioremap(pdev, "hdmi_pll", "HDMI_PLL");
|
||||
pll->mmio_qserdes_com = msm_ioremap(pdev, "hdmi_pll");
|
||||
if (IS_ERR(pll->mmio_qserdes_com)) {
|
||||
DRM_DEV_ERROR(dev, "failed to map pll base\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
for (i = 0; i < HDMI_NUM_TX_CHANNEL; i++) {
|
||||
char name[32], label[32];
|
||||
char name[32];
|
||||
|
||||
snprintf(name, sizeof(name), "hdmi_tx_l%d", i);
|
||||
snprintf(label, sizeof(label), "HDMI_TX_L%d", i);
|
||||
|
||||
pll->mmio_qserdes_tx[i] = msm_ioremap(pdev, name, label);
|
||||
pll->mmio_qserdes_tx[i] = msm_ioremap(pdev, name);
|
||||
if (IS_ERR(pll->mmio_qserdes_tx[i])) {
|
||||
DRM_DEV_ERROR(dev, "failed to map pll base\n");
|
||||
return -ENOMEM;
|
||||
|
@ -434,7 +434,7 @@ int msm_hdmi_pll_8960_init(struct platform_device *pdev)
|
||||
if (!pll)
|
||||
return -ENOMEM;
|
||||
|
||||
pll->mmio = msm_ioremap(pdev, "hdmi_pll", "HDMI_PLL");
|
||||
pll->mmio = msm_ioremap(pdev, "hdmi_pll");
|
||||
if (IS_ERR(pll->mmio)) {
|
||||
DRM_DEV_ERROR(dev, "failed to map pll base\n");
|
||||
return -ENOMEM;
|
||||
|
@ -201,9 +201,9 @@ shrink_set(void *data, u64 val)
|
||||
return 0;
|
||||
}
|
||||
|
||||
DEFINE_SIMPLE_ATTRIBUTE(shrink_fops,
|
||||
shrink_get, shrink_set,
|
||||
"0x%08llx\n");
|
||||
DEFINE_DEBUGFS_ATTRIBUTE(shrink_fops,
|
||||
shrink_get, shrink_set,
|
||||
"0x%08llx\n");
|
||||
|
||||
|
||||
static int msm_gem_show(struct seq_file *m, void *arg)
|
||||
|
@ -57,14 +57,6 @@ static const struct drm_mode_config_helper_funcs mode_config_helper_funcs = {
|
||||
.atomic_commit_tail = msm_atomic_commit_tail,
|
||||
};
|
||||
|
||||
#ifdef CONFIG_DRM_MSM_REGISTER_LOGGING
|
||||
static bool reglog;
|
||||
MODULE_PARM_DESC(reglog, "Enable register read/write logging");
|
||||
module_param(reglog, bool, 0600);
|
||||
#else
|
||||
#define reglog 0
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_DRM_FBDEV_EMULATION
|
||||
static bool fbdev = true;
|
||||
MODULE_PARM_DESC(fbdev, "Enable fbdev compat layer");
|
||||
@ -83,152 +75,6 @@ static bool modeset = true;
|
||||
MODULE_PARM_DESC(modeset, "Use kernel modesetting [KMS] (1=on (default), 0=disable)");
|
||||
module_param(modeset, bool, 0600);
|
||||
|
||||
/*
|
||||
* Util/helpers:
|
||||
*/
|
||||
|
||||
struct clk *msm_clk_bulk_get_clock(struct clk_bulk_data *bulk, int count,
|
||||
const char *name)
|
||||
{
|
||||
int i;
|
||||
char n[32];
|
||||
|
||||
snprintf(n, sizeof(n), "%s_clk", name);
|
||||
|
||||
for (i = 0; bulk && i < count; i++) {
|
||||
if (!strcmp(bulk[i].id, name) || !strcmp(bulk[i].id, n))
|
||||
return bulk[i].clk;
|
||||
}
|
||||
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
struct clk *msm_clk_get(struct platform_device *pdev, const char *name)
|
||||
{
|
||||
struct clk *clk;
|
||||
char name2[32];
|
||||
|
||||
clk = devm_clk_get(&pdev->dev, name);
|
||||
if (!IS_ERR(clk) || PTR_ERR(clk) == -EPROBE_DEFER)
|
||||
return clk;
|
||||
|
||||
snprintf(name2, sizeof(name2), "%s_clk", name);
|
||||
|
||||
clk = devm_clk_get(&pdev->dev, name2);
|
||||
if (!IS_ERR(clk))
|
||||
dev_warn(&pdev->dev, "Using legacy clk name binding. Use "
|
||||
"\"%s\" instead of \"%s\"\n", name, name2);
|
||||
|
||||
return clk;
|
||||
}
|
||||
|
||||
static void __iomem *_msm_ioremap(struct platform_device *pdev, const char *name,
|
||||
const char *dbgname, bool quiet, phys_addr_t *psize)
|
||||
{
|
||||
struct resource *res;
|
||||
unsigned long size;
|
||||
void __iomem *ptr;
|
||||
|
||||
if (name)
|
||||
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
|
||||
else
|
||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
|
||||
if (!res) {
|
||||
if (!quiet)
|
||||
DRM_DEV_ERROR(&pdev->dev, "failed to get memory resource: %s\n", name);
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
size = resource_size(res);
|
||||
|
||||
ptr = devm_ioremap(&pdev->dev, res->start, size);
|
||||
if (!ptr) {
|
||||
if (!quiet)
|
||||
DRM_DEV_ERROR(&pdev->dev, "failed to ioremap: %s\n", name);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
if (reglog)
|
||||
printk(KERN_DEBUG "IO:region %s %p %08lx\n", dbgname, ptr, size);
|
||||
|
||||
if (psize)
|
||||
*psize = size;
|
||||
|
||||
return ptr;
|
||||
}
|
||||
|
||||
void __iomem *msm_ioremap(struct platform_device *pdev, const char *name,
|
||||
const char *dbgname)
|
||||
{
|
||||
return _msm_ioremap(pdev, name, dbgname, false, NULL);
|
||||
}
|
||||
|
||||
void __iomem *msm_ioremap_quiet(struct platform_device *pdev, const char *name,
|
||||
const char *dbgname)
|
||||
{
|
||||
return _msm_ioremap(pdev, name, dbgname, true, NULL);
|
||||
}
|
||||
|
||||
void __iomem *msm_ioremap_size(struct platform_device *pdev, const char *name,
|
||||
const char *dbgname, phys_addr_t *psize)
|
||||
{
|
||||
return _msm_ioremap(pdev, name, dbgname, false, psize);
|
||||
}
|
||||
|
||||
void msm_writel(u32 data, void __iomem *addr)
|
||||
{
|
||||
if (reglog)
|
||||
printk(KERN_DEBUG "IO:W %p %08x\n", addr, data);
|
||||
writel(data, addr);
|
||||
}
|
||||
|
||||
u32 msm_readl(const void __iomem *addr)
|
||||
{
|
||||
u32 val = readl(addr);
|
||||
if (reglog)
|
||||
pr_err("IO:R %p %08x\n", addr, val);
|
||||
return val;
|
||||
}
|
||||
|
||||
void msm_rmw(void __iomem *addr, u32 mask, u32 or)
|
||||
{
|
||||
u32 val = msm_readl(addr);
|
||||
|
||||
val &= ~mask;
|
||||
msm_writel(val | or, addr);
|
||||
}
|
||||
|
||||
static enum hrtimer_restart msm_hrtimer_worktimer(struct hrtimer *t)
|
||||
{
|
||||
struct msm_hrtimer_work *work = container_of(t,
|
||||
struct msm_hrtimer_work, timer);
|
||||
|
||||
kthread_queue_work(work->worker, &work->work);
|
||||
|
||||
return HRTIMER_NORESTART;
|
||||
}
|
||||
|
||||
void msm_hrtimer_queue_work(struct msm_hrtimer_work *work,
|
||||
ktime_t wakeup_time,
|
||||
enum hrtimer_mode mode)
|
||||
{
|
||||
hrtimer_start(&work->timer, wakeup_time, mode);
|
||||
}
|
||||
|
||||
void msm_hrtimer_work_init(struct msm_hrtimer_work *work,
|
||||
struct kthread_worker *worker,
|
||||
kthread_work_func_t fn,
|
||||
clockid_t clock_id,
|
||||
enum hrtimer_mode mode)
|
||||
{
|
||||
hrtimer_init(&work->timer, clock_id, mode);
|
||||
work->timer.function = msm_hrtimer_worktimer;
|
||||
work->worker = worker;
|
||||
kthread_init_work(&work->work, fn);
|
||||
}
|
||||
|
||||
static irqreturn_t msm_irq(int irq, void *arg)
|
||||
{
|
||||
struct drm_device *dev = arg;
|
||||
@ -763,7 +609,8 @@ static int msm_ioctl_get_param(struct drm_device *dev, void *data,
|
||||
if (!gpu)
|
||||
return -ENXIO;
|
||||
|
||||
return gpu->funcs->get_param(gpu, args->param, &args->value);
|
||||
return gpu->funcs->get_param(gpu, file->driver_priv,
|
||||
args->param, &args->value);
|
||||
}
|
||||
|
||||
static int msm_ioctl_gem_new(struct drm_device *dev, void *data,
|
||||
|
@ -462,15 +462,21 @@ struct clk *msm_clk_get(struct platform_device *pdev, const char *name);
|
||||
|
||||
struct clk *msm_clk_bulk_get_clock(struct clk_bulk_data *bulk, int count,
|
||||
const char *name);
|
||||
void __iomem *msm_ioremap(struct platform_device *pdev, const char *name,
|
||||
const char *dbgname);
|
||||
void __iomem *msm_ioremap(struct platform_device *pdev, const char *name);
|
||||
void __iomem *msm_ioremap_size(struct platform_device *pdev, const char *name,
|
||||
const char *dbgname, phys_addr_t *size);
|
||||
void __iomem *msm_ioremap_quiet(struct platform_device *pdev, const char *name,
|
||||
const char *dbgname);
|
||||
void msm_writel(u32 data, void __iomem *addr);
|
||||
u32 msm_readl(const void __iomem *addr);
|
||||
void msm_rmw(void __iomem *addr, u32 mask, u32 or);
|
||||
phys_addr_t *size);
|
||||
void __iomem *msm_ioremap_quiet(struct platform_device *pdev, const char *name);
|
||||
|
||||
#define msm_writel(data, addr) writel((data), (addr))
|
||||
#define msm_readl(addr) readl((addr))
|
||||
|
||||
static inline void msm_rmw(void __iomem *addr, u32 mask, u32 or)
|
||||
{
|
||||
u32 val = msm_readl(addr);
|
||||
|
||||
val &= ~mask;
|
||||
msm_writel(val | or, addr);
|
||||
}
|
||||
|
||||
/**
|
||||
* struct msm_hrtimer_work - a helper to combine an hrtimer with kthread_work
|
||||
|
@ -35,6 +35,9 @@ struct msm_gem_address_space {
|
||||
* will be non-NULL:
|
||||
*/
|
||||
struct pid *pid;
|
||||
|
||||
/* @faults: the number of GPU hangs associated with this address space */
|
||||
int faults;
|
||||
};
|
||||
|
||||
struct msm_gem_vma {
|
||||
|
@ -370,8 +370,8 @@ static void recover_worker(struct kthread_work *work)
|
||||
struct task_struct *task;
|
||||
|
||||
/* Increment the fault counts */
|
||||
gpu->global_faults++;
|
||||
submit->queue->faults++;
|
||||
submit->aspace->faults++;
|
||||
|
||||
task = get_pid_task(submit->pid, PIDTYPE_PID);
|
||||
if (task) {
|
||||
@ -389,6 +389,12 @@ static void recover_worker(struct kthread_work *work)
|
||||
} else {
|
||||
msm_rd_dump_submit(priv->hangrd, submit, NULL);
|
||||
}
|
||||
} else {
|
||||
/*
|
||||
* We couldn't attribute this fault to any particular context,
|
||||
* so increment the global fault count instead.
|
||||
*/
|
||||
gpu->global_faults++;
|
||||
}
|
||||
|
||||
/* Record the crash state */
|
||||
@ -861,7 +867,7 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
|
||||
|
||||
|
||||
/* Map registers: */
|
||||
gpu->mmio = msm_ioremap(pdev, config->ioname, name);
|
||||
gpu->mmio = msm_ioremap(pdev, config->ioname);
|
||||
if (IS_ERR(gpu->mmio)) {
|
||||
ret = PTR_ERR(gpu->mmio);
|
||||
goto fail;
|
||||
|
@ -42,7 +42,8 @@ struct msm_gpu_config {
|
||||
* + z180_gpu
|
||||
*/
|
||||
struct msm_gpu_funcs {
|
||||
int (*get_param)(struct msm_gpu *gpu, uint32_t param, uint64_t *value);
|
||||
int (*get_param)(struct msm_gpu *gpu, struct msm_file_private *ctx,
|
||||
uint32_t param, uint64_t *value);
|
||||
int (*hw_init)(struct msm_gpu *gpu);
|
||||
int (*pm_suspend)(struct msm_gpu *gpu);
|
||||
int (*pm_resume)(struct msm_gpu *gpu);
|
||||
@ -199,7 +200,10 @@ struct msm_gpu {
|
||||
/* does gpu need hw_init? */
|
||||
bool needs_hw_init;
|
||||
|
||||
/* number of GPU hangs (for all contexts) */
|
||||
/**
|
||||
* global_faults: number of GPU hangs not attributed to a particular
|
||||
* address space
|
||||
*/
|
||||
int global_faults;
|
||||
|
||||
void __iomem *mmio;
|
||||
|
126
drivers/gpu/drm/msm/msm_io_utils.c
Normal file
126
drivers/gpu/drm/msm/msm_io_utils.c
Normal file
@ -0,0 +1,126 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Copyright (c) 2016-2018, 2020-2021 The Linux Foundation. All rights reserved.
|
||||
* Copyright (C) 2013 Red Hat
|
||||
* Author: Rob Clark <robdclark@gmail.com>
|
||||
*/
|
||||
|
||||
#include "msm_drv.h"
|
||||
|
||||
/*
|
||||
* Util/helpers:
|
||||
*/
|
||||
|
||||
struct clk *msm_clk_bulk_get_clock(struct clk_bulk_data *bulk, int count,
|
||||
const char *name)
|
||||
{
|
||||
int i;
|
||||
char n[32];
|
||||
|
||||
snprintf(n, sizeof(n), "%s_clk", name);
|
||||
|
||||
for (i = 0; bulk && i < count; i++) {
|
||||
if (!strcmp(bulk[i].id, name) || !strcmp(bulk[i].id, n))
|
||||
return bulk[i].clk;
|
||||
}
|
||||
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
struct clk *msm_clk_get(struct platform_device *pdev, const char *name)
|
||||
{
|
||||
struct clk *clk;
|
||||
char name2[32];
|
||||
|
||||
clk = devm_clk_get(&pdev->dev, name);
|
||||
if (!IS_ERR(clk) || PTR_ERR(clk) == -EPROBE_DEFER)
|
||||
return clk;
|
||||
|
||||
snprintf(name2, sizeof(name2), "%s_clk", name);
|
||||
|
||||
clk = devm_clk_get(&pdev->dev, name2);
|
||||
if (!IS_ERR(clk))
|
||||
dev_warn(&pdev->dev, "Using legacy clk name binding. Use "
|
||||
"\"%s\" instead of \"%s\"\n", name, name2);
|
||||
|
||||
return clk;
|
||||
}
|
||||
|
||||
static void __iomem *_msm_ioremap(struct platform_device *pdev, const char *name,
|
||||
bool quiet, phys_addr_t *psize)
|
||||
{
|
||||
struct resource *res;
|
||||
unsigned long size;
|
||||
void __iomem *ptr;
|
||||
|
||||
if (name)
|
||||
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
|
||||
else
|
||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
|
||||
if (!res) {
|
||||
if (!quiet)
|
||||
DRM_DEV_ERROR(&pdev->dev, "failed to get memory resource: %s\n", name);
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
size = resource_size(res);
|
||||
|
||||
ptr = devm_ioremap(&pdev->dev, res->start, size);
|
||||
if (!ptr) {
|
||||
if (!quiet)
|
||||
DRM_DEV_ERROR(&pdev->dev, "failed to ioremap: %s\n", name);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
if (psize)
|
||||
*psize = size;
|
||||
|
||||
return ptr;
|
||||
}
|
||||
|
||||
void __iomem *msm_ioremap(struct platform_device *pdev, const char *name)
|
||||
{
|
||||
return _msm_ioremap(pdev, name, false, NULL);
|
||||
}
|
||||
|
||||
void __iomem *msm_ioremap_quiet(struct platform_device *pdev, const char *name)
|
||||
{
|
||||
return _msm_ioremap(pdev, name, true, NULL);
|
||||
}
|
||||
|
||||
void __iomem *msm_ioremap_size(struct platform_device *pdev, const char *name,
|
||||
phys_addr_t *psize)
|
||||
{
|
||||
return _msm_ioremap(pdev, name, false, psize);
|
||||
}
|
||||
|
||||
static enum hrtimer_restart msm_hrtimer_worktimer(struct hrtimer *t)
|
||||
{
|
||||
struct msm_hrtimer_work *work = container_of(t,
|
||||
struct msm_hrtimer_work, timer);
|
||||
|
||||
kthread_queue_work(work->worker, &work->work);
|
||||
|
||||
return HRTIMER_NORESTART;
|
||||
}
|
||||
|
||||
void msm_hrtimer_queue_work(struct msm_hrtimer_work *work,
|
||||
ktime_t wakeup_time,
|
||||
enum hrtimer_mode mode)
|
||||
{
|
||||
hrtimer_start(&work->timer, wakeup_time, mode);
|
||||
}
|
||||
|
||||
void msm_hrtimer_work_init(struct msm_hrtimer_work *work,
|
||||
struct kthread_worker *worker,
|
||||
kthread_work_func_t fn,
|
||||
clockid_t clock_id,
|
||||
enum hrtimer_mode mode)
|
||||
{
|
||||
hrtimer_init(&work->timer, clock_id, mode);
|
||||
work->timer.function = msm_hrtimer_worktimer;
|
||||
work->worker = worker;
|
||||
kthread_init_work(&work->work, fn);
|
||||
}
|
@ -62,6 +62,7 @@ enum rd_sect_type {
|
||||
RD_FRAG_SHADER,
|
||||
RD_BUFFER_CONTENTS,
|
||||
RD_GPU_ID,
|
||||
RD_CHIP_ID,
|
||||
};
|
||||
|
||||
#define BUF_SZ 512 /* should be power of 2 */
|
||||
@ -196,12 +197,17 @@ static int rd_open(struct inode *inode, struct file *file)
|
||||
|
||||
/* the parsing tools need to know gpu-id to know which
|
||||
* register database to load.
|
||||
*
|
||||
* Note: These particular params do not require a context
|
||||
*/
|
||||
gpu->funcs->get_param(gpu, MSM_PARAM_GPU_ID, &val);
|
||||
gpu->funcs->get_param(gpu, NULL, MSM_PARAM_GPU_ID, &val);
|
||||
gpu_id = val;
|
||||
|
||||
rd_write_section(rd, RD_GPU_ID, &gpu_id, sizeof(gpu_id));
|
||||
|
||||
gpu->funcs->get_param(gpu, NULL, MSM_PARAM_CHIP_ID, &val);
|
||||
rd_write_section(rd, RD_CHIP_ID, &val, sizeof(val));
|
||||
|
||||
out:
|
||||
mutex_unlock(&gpu->lock);
|
||||
return ret;
|
||||
|
Loading…
Reference in New Issue
Block a user