forked from Minki/linux
drm: msm: Use DRM_DEV_* instead of dev_*
Use DRM_DEV_INFO/ERROR/WARN instead of dev_info/err/debug to generate drm-formatted specific log messages so that it will be easy to differentiate in case of multiple instances of driver. Signed-off-by: Mamta Shukla <mamtashukla555@gmail.com> Signed-off-by: Rob Clark <robdclark@gmail.com>
This commit is contained in:
parent
84511abc47
commit
6a41da17e8
@ -481,7 +481,7 @@ struct msm_gpu *a3xx_gpu_init(struct drm_device *dev)
|
||||
int ret;
|
||||
|
||||
if (!pdev) {
|
||||
dev_err(dev->dev, "no a3xx device\n");
|
||||
DRM_DEV_ERROR(dev->dev, "no a3xx device\n");
|
||||
ret = -ENXIO;
|
||||
goto fail;
|
||||
}
|
||||
@ -528,7 +528,7 @@ struct msm_gpu *a3xx_gpu_init(struct drm_device *dev)
|
||||
* to not be possible to restrict access, then we must
|
||||
* implement a cmdstream validator.
|
||||
*/
|
||||
dev_err(dev->dev, "No memory protection without IOMMU\n");
|
||||
DRM_DEV_ERROR(dev->dev, "No memory protection without IOMMU\n");
|
||||
ret = -ENXIO;
|
||||
goto fail;
|
||||
}
|
||||
|
@ -561,7 +561,7 @@ struct msm_gpu *a4xx_gpu_init(struct drm_device *dev)
|
||||
int ret;
|
||||
|
||||
if (!pdev) {
|
||||
dev_err(dev->dev, "no a4xx device\n");
|
||||
DRM_DEV_ERROR(dev->dev, "no a4xx device\n");
|
||||
ret = -ENXIO;
|
||||
goto fail;
|
||||
}
|
||||
@ -608,7 +608,7 @@ struct msm_gpu *a4xx_gpu_init(struct drm_device *dev)
|
||||
* to not be possible to restrict access, then we must
|
||||
* implement a cmdstream validator.
|
||||
*/
|
||||
dev_err(dev->dev, "No memory protection without IOMMU\n");
|
||||
DRM_DEV_ERROR(dev->dev, "No memory protection without IOMMU\n");
|
||||
ret = -ENXIO;
|
||||
goto fail;
|
||||
}
|
||||
|
@ -173,7 +173,7 @@ int a5xx_debugfs_init(struct msm_gpu *gpu, struct drm_minor *minor)
|
||||
minor->debugfs_root, minor);
|
||||
|
||||
if (ret) {
|
||||
dev_err(dev->dev, "could not install a5xx_debugfs_list\n");
|
||||
DRM_DEV_ERROR(dev->dev, "could not install a5xx_debugfs_list\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -514,7 +514,7 @@ static int a5xx_ucode_init(struct msm_gpu *gpu)
|
||||
if (IS_ERR(a5xx_gpu->pm4_bo)) {
|
||||
ret = PTR_ERR(a5xx_gpu->pm4_bo);
|
||||
a5xx_gpu->pm4_bo = NULL;
|
||||
dev_err(gpu->dev->dev, "could not allocate PM4: %d\n",
|
||||
DRM_DEV_ERROR(gpu->dev->dev, "could not allocate PM4: %d\n",
|
||||
ret);
|
||||
return ret;
|
||||
}
|
||||
@ -527,7 +527,7 @@ static int a5xx_ucode_init(struct msm_gpu *gpu)
|
||||
if (IS_ERR(a5xx_gpu->pfp_bo)) {
|
||||
ret = PTR_ERR(a5xx_gpu->pfp_bo);
|
||||
a5xx_gpu->pfp_bo = NULL;
|
||||
dev_err(gpu->dev->dev, "could not allocate PFP: %d\n",
|
||||
DRM_DEV_ERROR(gpu->dev->dev, "could not allocate PFP: %d\n",
|
||||
ret);
|
||||
return ret;
|
||||
}
|
||||
@ -1028,7 +1028,7 @@ static void a5xx_fault_detect_irq(struct msm_gpu *gpu)
|
||||
struct msm_drm_private *priv = dev->dev_private;
|
||||
struct msm_ringbuffer *ring = gpu->funcs->active_ring(gpu);
|
||||
|
||||
dev_err(dev->dev, "gpu fault ring %d fence %x status %8.8X rb %4.4x/%4.4x ib1 %16.16llX/%4.4x ib2 %16.16llX/%4.4x\n",
|
||||
DRM_DEV_ERROR(dev->dev, "gpu fault ring %d fence %x status %8.8X rb %4.4x/%4.4x ib1 %16.16llX/%4.4x ib2 %16.16llX/%4.4x\n",
|
||||
ring ? ring->id : -1, ring ? ring->seqno : 0,
|
||||
gpu_read(gpu, REG_A5XX_RBBM_STATUS),
|
||||
gpu_read(gpu, REG_A5XX_CP_RB_RPTR),
|
||||
@ -1134,7 +1134,7 @@ static const u32 a5xx_registers[] = {
|
||||
|
||||
static void a5xx_dump(struct msm_gpu *gpu)
|
||||
{
|
||||
dev_info(gpu->dev->dev, "status: %08x\n",
|
||||
DRM_DEV_INFO(gpu->dev->dev, "status: %08x\n",
|
||||
gpu_read(gpu, REG_A5XX_RBBM_STATUS));
|
||||
adreno_dump(gpu);
|
||||
}
|
||||
@ -1505,7 +1505,7 @@ struct msm_gpu *a5xx_gpu_init(struct drm_device *dev)
|
||||
int ret;
|
||||
|
||||
if (!pdev) {
|
||||
dev_err(dev->dev, "No A5XX device is defined\n");
|
||||
DRM_DEV_ERROR(dev->dev, "No A5XX device is defined\n");
|
||||
return ERR_PTR(-ENXIO);
|
||||
}
|
||||
|
||||
|
@ -92,7 +92,7 @@ static void a5xx_preempt_timer(struct timer_list *t)
|
||||
if (!try_preempt_state(a5xx_gpu, PREEMPT_TRIGGERED, PREEMPT_FAULTED))
|
||||
return;
|
||||
|
||||
dev_err(dev->dev, "%s: preemption timed out\n", gpu->name);
|
||||
DRM_DEV_ERROR(dev->dev, "%s: preemption timed out\n", gpu->name);
|
||||
queue_work(priv->wq, &gpu->recover_work);
|
||||
}
|
||||
|
||||
@ -188,7 +188,7 @@ void a5xx_preempt_irq(struct msm_gpu *gpu)
|
||||
status = gpu_read(gpu, REG_A5XX_CP_CONTEXT_SWITCH_CNTL);
|
||||
if (unlikely(status)) {
|
||||
set_preempt_state(a5xx_gpu, PREEMPT_FAULTED);
|
||||
dev_err(dev->dev, "%s: Preemption failed to complete\n",
|
||||
DRM_DEV_ERROR(dev->dev, "%s: Preemption failed to complete\n",
|
||||
gpu->name);
|
||||
queue_work(priv->wq, &gpu->recover_work);
|
||||
return;
|
||||
|
@ -153,7 +153,7 @@ static int a6xx_gmu_start(struct a6xx_gmu *gmu)
|
||||
val == 0xbabeface, 100, 10000);
|
||||
|
||||
if (ret)
|
||||
dev_err(gmu->dev, "GMU firmware initialization timed out\n");
|
||||
DRM_DEV_ERROR(gmu->dev, "GMU firmware initialization timed out\n");
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -168,7 +168,7 @@ static int a6xx_gmu_hfi_start(struct a6xx_gmu *gmu)
|
||||
ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_HFI_CTRL_STATUS, val,
|
||||
val & 1, 100, 10000);
|
||||
if (ret)
|
||||
dev_err(gmu->dev, "Unable to start the HFI queues\n");
|
||||
DRM_DEV_ERROR(gmu->dev, "Unable to start the HFI queues\n");
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -209,7 +209,7 @@ int a6xx_gmu_set_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state)
|
||||
val & (1 << ack), 100, 10000);
|
||||
|
||||
if (ret)
|
||||
dev_err(gmu->dev,
|
||||
DRM_DEV_ERROR(gmu->dev,
|
||||
"Timeout waiting for GMU OOB set %s: 0x%x\n",
|
||||
name,
|
||||
gmu_read(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO));
|
||||
@ -251,7 +251,7 @@ static int a6xx_sptprac_enable(struct a6xx_gmu *gmu)
|
||||
(val & 0x38) == 0x28, 1, 100);
|
||||
|
||||
if (ret) {
|
||||
dev_err(gmu->dev, "Unable to power on SPTPRAC: 0x%x\n",
|
||||
DRM_DEV_ERROR(gmu->dev, "Unable to power on SPTPRAC: 0x%x\n",
|
||||
gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS));
|
||||
}
|
||||
|
||||
@ -273,7 +273,7 @@ static void a6xx_sptprac_disable(struct a6xx_gmu *gmu)
|
||||
(val & 0x04), 100, 10000);
|
||||
|
||||
if (ret)
|
||||
dev_err(gmu->dev, "failed to power off SPTPRAC: 0x%x\n",
|
||||
DRM_DEV_ERROR(gmu->dev, "failed to power off SPTPRAC: 0x%x\n",
|
||||
gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS));
|
||||
}
|
||||
|
||||
@ -317,7 +317,7 @@ static int a6xx_gmu_notify_slumber(struct a6xx_gmu *gmu)
|
||||
/* Check to see if the GMU really did slumber */
|
||||
if (gmu_read(gmu, REG_A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE)
|
||||
!= 0x0f) {
|
||||
dev_err(gmu->dev, "The GMU did not go into slumber\n");
|
||||
DRM_DEV_ERROR(gmu->dev, "The GMU did not go into slumber\n");
|
||||
ret = -ETIMEDOUT;
|
||||
}
|
||||
}
|
||||
@ -339,7 +339,7 @@ static int a6xx_rpmh_start(struct a6xx_gmu *gmu)
|
||||
ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_RSCC_CONTROL_ACK, val,
|
||||
val & (1 << 1), 100, 10000);
|
||||
if (ret) {
|
||||
dev_err(gmu->dev, "Unable to power on the GPU RSC\n");
|
||||
DRM_DEV_ERROR(gmu->dev, "Unable to power on the GPU RSC\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -354,7 +354,7 @@ static int a6xx_rpmh_start(struct a6xx_gmu *gmu)
|
||||
return 0;
|
||||
}
|
||||
|
||||
dev_err(gmu->dev, "GPU RSC sequence stuck while waking up the GPU\n");
|
||||
DRM_DEV_ERROR(gmu->dev, "GPU RSC sequence stuck while waking up the GPU\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -368,7 +368,7 @@ static void a6xx_rpmh_stop(struct a6xx_gmu *gmu)
|
||||
ret = gmu_poll_timeout(gmu, REG_A6XX_GPU_RSCC_RSC_STATUS0_DRV0,
|
||||
val, val & (1 << 16), 100, 10000);
|
||||
if (ret)
|
||||
dev_err(gmu->dev, "Unable to power off the GPU RSC\n");
|
||||
DRM_DEV_ERROR(gmu->dev, "Unable to power off the GPU RSC\n");
|
||||
|
||||
gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 0);
|
||||
}
|
||||
@ -520,7 +520,7 @@ static int a6xx_gmu_fw_start(struct a6xx_gmu *gmu, unsigned int state)
|
||||
|
||||
/* Sanity check the size of the firmware that was loaded */
|
||||
if (adreno_gpu->fw[ADRENO_FW_GMU]->size > 0x8000) {
|
||||
dev_err(gmu->dev,
|
||||
DRM_DEV_ERROR(gmu->dev,
|
||||
"GMU firmware is bigger than the available region\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
@ -764,7 +764,7 @@ int a6xx_gmu_stop(struct a6xx_gpu *a6xx_gpu)
|
||||
*/
|
||||
|
||||
if (ret)
|
||||
dev_err(gmu->dev,
|
||||
DRM_DEV_ERROR(gmu->dev,
|
||||
"Unable to slumber GMU: status = 0%x/0%x\n",
|
||||
gmu_read(gmu,
|
||||
REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS),
|
||||
@ -843,7 +843,7 @@ static struct a6xx_gmu_bo *a6xx_gmu_memory_alloc(struct a6xx_gmu *gmu,
|
||||
IOMMU_READ | IOMMU_WRITE);
|
||||
|
||||
if (ret) {
|
||||
dev_err(gmu->dev, "Unable to map GMU buffer object\n");
|
||||
DRM_DEV_ERROR(gmu->dev, "Unable to map GMU buffer object\n");
|
||||
|
||||
for (i = i - 1 ; i >= 0; i--)
|
||||
iommu_unmap(gmu->domain,
|
||||
@ -969,12 +969,12 @@ static int a6xx_gmu_rpmh_arc_votes_init(struct device *dev, u32 *votes,
|
||||
}
|
||||
|
||||
if (j == pri_count) {
|
||||
dev_err(dev,
|
||||
DRM_DEV_ERROR(dev,
|
||||
"Level %u not found in in the RPMh list\n",
|
||||
level);
|
||||
dev_err(dev, "Available levels:\n");
|
||||
DRM_DEV_ERROR(dev, "Available levels:\n");
|
||||
for (j = 0; j < pri_count; j++)
|
||||
dev_err(dev, " %u\n", pri[j]);
|
||||
DRM_DEV_ERROR(dev, " %u\n", pri[j]);
|
||||
|
||||
return -EINVAL;
|
||||
}
|
||||
@ -1081,7 +1081,7 @@ static int a6xx_gmu_pwrlevels_probe(struct a6xx_gmu *gmu)
|
||||
*/
|
||||
ret = dev_pm_opp_of_add_table(gmu->dev);
|
||||
if (ret) {
|
||||
dev_err(gmu->dev, "Unable to set the OPP table for the GMU\n");
|
||||
DRM_DEV_ERROR(gmu->dev, "Unable to set the OPP table for the GMU\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -1122,13 +1122,13 @@ static void __iomem *a6xx_gmu_get_mmio(struct platform_device *pdev,
|
||||
IORESOURCE_MEM, name);
|
||||
|
||||
if (!res) {
|
||||
dev_err(&pdev->dev, "Unable to find the %s registers\n", name);
|
||||
DRM_DEV_ERROR(&pdev->dev, "Unable to find the %s registers\n", name);
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
ret = devm_ioremap(&pdev->dev, res->start, resource_size(res));
|
||||
if (!ret) {
|
||||
dev_err(&pdev->dev, "Unable to map the %s registers\n", name);
|
||||
DRM_DEV_ERROR(&pdev->dev, "Unable to map the %s registers\n", name);
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
@ -1145,7 +1145,7 @@ static int a6xx_gmu_get_irq(struct a6xx_gmu *gmu, struct platform_device *pdev,
|
||||
ret = devm_request_irq(&pdev->dev, irq, handler, IRQF_TRIGGER_HIGH,
|
||||
name, gmu);
|
||||
if (ret) {
|
||||
dev_err(&pdev->dev, "Unable to get interrupt %s\n", name);
|
||||
DRM_DEV_ERROR(&pdev->dev, "Unable to get interrupt %s\n", name);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -481,7 +481,7 @@ out:
|
||||
|
||||
static void a6xx_dump(struct msm_gpu *gpu)
|
||||
{
|
||||
dev_info(&gpu->pdev->dev, "status: %08x\n",
|
||||
DRM_DEV_INFO(&gpu->pdev->dev, "status: %08x\n",
|
||||
gpu_read(gpu, REG_A6XX_RBBM_STATUS));
|
||||
adreno_dump(gpu);
|
||||
}
|
||||
@ -498,7 +498,7 @@ static void a6xx_recover(struct msm_gpu *gpu)
|
||||
adreno_dump_info(gpu);
|
||||
|
||||
for (i = 0; i < 8; i++)
|
||||
dev_info(&gpu->pdev->dev, "CP_SCRATCH_REG%d: %u\n", i,
|
||||
DRM_DEV_INFO(&gpu->pdev->dev, "CP_SCRATCH_REG%d: %u\n", i,
|
||||
gpu_read(gpu, REG_A6XX_CP_SCRATCH_REG(i)));
|
||||
|
||||
if (hang_debug)
|
||||
|
@ -91,7 +91,7 @@ static int a6xx_hfi_wait_for_ack(struct a6xx_gmu *gmu, u32 id, u32 seqnum,
|
||||
val & A6XX_GMU_GMU2HOST_INTR_INFO_MSGQ, 100, 5000);
|
||||
|
||||
if (ret) {
|
||||
dev_err(gmu->dev,
|
||||
DRM_DEV_ERROR(gmu->dev,
|
||||
"Message %s id %d timed out waiting for response\n",
|
||||
a6xx_hfi_msg_id[id], seqnum);
|
||||
return -ETIMEDOUT;
|
||||
@ -110,7 +110,7 @@ static int a6xx_hfi_wait_for_ack(struct a6xx_gmu *gmu, u32 id, u32 seqnum,
|
||||
|
||||
/* If the queue is empty our response never made it */
|
||||
if (!ret) {
|
||||
dev_err(gmu->dev,
|
||||
DRM_DEV_ERROR(gmu->dev,
|
||||
"The HFI response queue is unexpectedly empty\n");
|
||||
|
||||
return -ENOENT;
|
||||
@ -120,20 +120,20 @@ static int a6xx_hfi_wait_for_ack(struct a6xx_gmu *gmu, u32 id, u32 seqnum,
|
||||
struct a6xx_hfi_msg_error *error =
|
||||
(struct a6xx_hfi_msg_error *) &resp;
|
||||
|
||||
dev_err(gmu->dev, "GMU firmware error %d\n",
|
||||
DRM_DEV_ERROR(gmu->dev, "GMU firmware error %d\n",
|
||||
error->code);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (seqnum != HFI_HEADER_SEQNUM(resp.ret_header)) {
|
||||
dev_err(gmu->dev,
|
||||
DRM_DEV_ERROR(gmu->dev,
|
||||
"Unexpected message id %d on the response queue\n",
|
||||
HFI_HEADER_SEQNUM(resp.ret_header));
|
||||
continue;
|
||||
}
|
||||
|
||||
if (resp.error) {
|
||||
dev_err(gmu->dev,
|
||||
DRM_DEV_ERROR(gmu->dev,
|
||||
"Message %s id %d returned error %d\n",
|
||||
a6xx_hfi_msg_id[id], seqnum, resp.error);
|
||||
return -EINVAL;
|
||||
@ -163,7 +163,7 @@ static int a6xx_hfi_send_msg(struct a6xx_gmu *gmu, int id,
|
||||
|
||||
ret = a6xx_hfi_queue_write(gmu, queue, data, dwords);
|
||||
if (ret) {
|
||||
dev_err(gmu->dev, "Unable to send message %s id %d\n",
|
||||
DRM_DEV_ERROR(gmu->dev, "Unable to send message %s id %d\n",
|
||||
a6xx_hfi_msg_id[id], seqnum);
|
||||
return ret;
|
||||
}
|
||||
@ -317,7 +317,7 @@ void a6xx_hfi_stop(struct a6xx_gmu *gmu)
|
||||
continue;
|
||||
|
||||
if (queue->header->read_index != queue->header->write_index)
|
||||
dev_err(gmu->dev, "HFI queue %d is not empty\n", i);
|
||||
DRM_DEV_ERROR(gmu->dev, "HFI queue %d is not empty\n", i);
|
||||
|
||||
queue->header->read_index = 0;
|
||||
queue->header->write_index = 0;
|
||||
|
@ -196,7 +196,7 @@ struct msm_gpu *adreno_load_gpu(struct drm_device *dev)
|
||||
|
||||
ret = pm_runtime_get_sync(&pdev->dev);
|
||||
if (ret < 0) {
|
||||
dev_err(dev->dev, "Couldn't power up the GPU: %d\n", ret);
|
||||
DRM_DEV_ERROR(dev->dev, "Couldn't power up the GPU: %d\n", ret);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
@ -205,7 +205,7 @@ struct msm_gpu *adreno_load_gpu(struct drm_device *dev)
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
pm_runtime_put_autosuspend(&pdev->dev);
|
||||
if (ret) {
|
||||
dev_err(dev->dev, "gpu hw init failed: %d\n", ret);
|
||||
DRM_DEV_ERROR(dev->dev, "gpu hw init failed: %d\n", ret);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
@ -253,7 +253,7 @@ static int find_chipid(struct device *dev, struct adreno_rev *rev)
|
||||
/* and if that fails, fall back to legacy "qcom,chipid" property: */
|
||||
ret = of_property_read_u32(node, "qcom,chipid", &chipid);
|
||||
if (ret) {
|
||||
dev_err(dev, "could not parse qcom,chipid: %d\n", ret);
|
||||
DRM_DEV_ERROR(dev, "could not parse qcom,chipid: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -89,12 +89,12 @@ adreno_request_fw(struct adreno_gpu *adreno_gpu, const char *fwname)
|
||||
|
||||
ret = request_firmware_direct(&fw, newname, drm->dev);
|
||||
if (!ret) {
|
||||
dev_info(drm->dev, "loaded %s from new location\n",
|
||||
DRM_DEV_INFO(drm->dev, "loaded %s from new location\n",
|
||||
newname);
|
||||
adreno_gpu->fwloc = FW_LOCATION_NEW;
|
||||
goto out;
|
||||
} else if (adreno_gpu->fwloc != FW_LOCATION_UNKNOWN) {
|
||||
dev_err(drm->dev, "failed to load %s: %d\n",
|
||||
DRM_DEV_ERROR(drm->dev, "failed to load %s: %d\n",
|
||||
newname, ret);
|
||||
fw = ERR_PTR(ret);
|
||||
goto out;
|
||||
@ -109,12 +109,12 @@ adreno_request_fw(struct adreno_gpu *adreno_gpu, const char *fwname)
|
||||
|
||||
ret = request_firmware_direct(&fw, fwname, drm->dev);
|
||||
if (!ret) {
|
||||
dev_info(drm->dev, "loaded %s from legacy location\n",
|
||||
DRM_DEV_INFO(drm->dev, "loaded %s from legacy location\n",
|
||||
newname);
|
||||
adreno_gpu->fwloc = FW_LOCATION_LEGACY;
|
||||
goto out;
|
||||
} else if (adreno_gpu->fwloc != FW_LOCATION_UNKNOWN) {
|
||||
dev_err(drm->dev, "failed to load %s: %d\n",
|
||||
DRM_DEV_ERROR(drm->dev, "failed to load %s: %d\n",
|
||||
fwname, ret);
|
||||
fw = ERR_PTR(ret);
|
||||
goto out;
|
||||
@ -130,19 +130,19 @@ adreno_request_fw(struct adreno_gpu *adreno_gpu, const char *fwname)
|
||||
|
||||
ret = request_firmware(&fw, newname, drm->dev);
|
||||
if (!ret) {
|
||||
dev_info(drm->dev, "loaded %s with helper\n",
|
||||
DRM_DEV_INFO(drm->dev, "loaded %s with helper\n",
|
||||
newname);
|
||||
adreno_gpu->fwloc = FW_LOCATION_HELPER;
|
||||
goto out;
|
||||
} else if (adreno_gpu->fwloc != FW_LOCATION_UNKNOWN) {
|
||||
dev_err(drm->dev, "failed to load %s: %d\n",
|
||||
DRM_DEV_ERROR(drm->dev, "failed to load %s: %d\n",
|
||||
newname, ret);
|
||||
fw = ERR_PTR(ret);
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
dev_err(drm->dev, "failed to load %s\n", fwname);
|
||||
DRM_DEV_ERROR(drm->dev, "failed to load %s\n", fwname);
|
||||
fw = ERR_PTR(-ENOENT);
|
||||
out:
|
||||
kfree(newname);
|
||||
@ -212,7 +212,7 @@ int adreno_hw_init(struct msm_gpu *gpu)
|
||||
ret = msm_gem_get_iova(ring->bo, gpu->aspace, &ring->iova);
|
||||
if (ret) {
|
||||
ring->iova = 0;
|
||||
dev_err(gpu->dev->dev,
|
||||
DRM_DEV_ERROR(gpu->dev->dev,
|
||||
"could not map ringbuffer %d: %d\n", i, ret);
|
||||
return ret;
|
||||
}
|
||||
@ -277,7 +277,7 @@ void adreno_recover(struct msm_gpu *gpu)
|
||||
|
||||
ret = msm_gpu_hw_init(gpu);
|
||||
if (ret) {
|
||||
dev_err(dev->dev, "gpu hw init failed: %d\n", ret);
|
||||
DRM_DEV_ERROR(dev->dev, "gpu hw init failed: %d\n", ret);
|
||||
/* hmm, oh well? */
|
||||
}
|
||||
}
|
||||
@ -635,7 +635,7 @@ static int adreno_get_legacy_pwrlevels(struct device *dev)
|
||||
|
||||
node = of_get_compatible_child(dev->of_node, "qcom,gpu-pwrlevels");
|
||||
if (!node) {
|
||||
dev_err(dev, "Could not find the GPU powerlevels\n");
|
||||
DRM_DEV_ERROR(dev, "Could not find the GPU powerlevels\n");
|
||||
return -ENXIO;
|
||||
}
|
||||
|
||||
@ -674,7 +674,7 @@ static int adreno_get_pwrlevels(struct device *dev,
|
||||
else {
|
||||
ret = dev_pm_opp_of_add_table(dev);
|
||||
if (ret)
|
||||
dev_err(dev, "Unable to set the OPP table\n");
|
||||
DRM_DEV_ERROR(dev, "Unable to set the OPP table\n");
|
||||
}
|
||||
|
||||
if (!ret) {
|
||||
|
@ -144,7 +144,7 @@ static struct dpu_dbg_base {
|
||||
static void _dpu_debug_bus_xbar_dump(void __iomem *mem_base,
|
||||
struct dpu_debug_bus_entry *entry, u32 val)
|
||||
{
|
||||
dev_err(dpu_dbg_base.dev, "xbar 0x%x %d %d 0x%x\n",
|
||||
DRM_DEV_ERROR(dpu_dbg_base.dev, "xbar 0x%x %d %d 0x%x\n",
|
||||
entry->wr_addr, entry->block_id, entry->test_id, val);
|
||||
}
|
||||
|
||||
@ -154,7 +154,7 @@ static void _dpu_debug_bus_lm_dump(void __iomem *mem_base,
|
||||
if (!(val & 0xFFF000))
|
||||
return;
|
||||
|
||||
dev_err(dpu_dbg_base.dev, "lm 0x%x %d %d 0x%x\n",
|
||||
DRM_DEV_ERROR(dpu_dbg_base.dev, "lm 0x%x %d %d 0x%x\n",
|
||||
entry->wr_addr, entry->block_id, entry->test_id, val);
|
||||
}
|
||||
|
||||
@ -164,7 +164,7 @@ static void _dpu_debug_bus_ppb0_dump(void __iomem *mem_base,
|
||||
if (!(val & BIT(15)))
|
||||
return;
|
||||
|
||||
dev_err(dpu_dbg_base.dev, "ppb0 0x%x %d %d 0x%x\n",
|
||||
DRM_DEV_ERROR(dpu_dbg_base.dev, "ppb0 0x%x %d %d 0x%x\n",
|
||||
entry->wr_addr, entry->block_id, entry->test_id, val);
|
||||
}
|
||||
|
||||
@ -174,7 +174,7 @@ static void _dpu_debug_bus_ppb1_dump(void __iomem *mem_base,
|
||||
if (!(val & BIT(15)))
|
||||
return;
|
||||
|
||||
dev_err(dpu_dbg_base.dev, "ppb1 0x%x %d %d 0x%x\n",
|
||||
DRM_DEV_ERROR(dpu_dbg_base.dev, "ppb1 0x%x %d %d 0x%x\n",
|
||||
entry->wr_addr, entry->block_id, entry->test_id, val);
|
||||
}
|
||||
|
||||
@ -1994,7 +1994,7 @@ static void _dpu_dbg_dump_dpu_dbg_bus(struct dpu_dbg_dpu_debug_bus *bus)
|
||||
if (!in_log && !in_mem)
|
||||
return;
|
||||
|
||||
dev_info(dpu_dbg_base.dev, "======== start %s dump =========\n",
|
||||
DRM_DEV_INFO(dpu_dbg_base.dev, "======== start %s dump =========\n",
|
||||
bus->cmn.name);
|
||||
|
||||
if (in_mem) {
|
||||
@ -2004,7 +2004,7 @@ static void _dpu_dbg_dump_dpu_dbg_bus(struct dpu_dbg_dpu_debug_bus *bus)
|
||||
|
||||
if (*dump_mem) {
|
||||
dump_addr = *dump_mem;
|
||||
dev_info(dpu_dbg_base.dev,
|
||||
DRM_DEV_INFO(dpu_dbg_base.dev,
|
||||
"%s: start_addr:0x%pK len:0x%x\n",
|
||||
__func__, dump_addr, list_size);
|
||||
} else {
|
||||
@ -2032,7 +2032,7 @@ static void _dpu_dbg_dump_dpu_dbg_bus(struct dpu_dbg_dpu_debug_bus *bus)
|
||||
status = readl_relaxed(mem_base + offset);
|
||||
|
||||
if (in_log)
|
||||
dev_info(dpu_dbg_base.dev,
|
||||
DRM_DEV_INFO(dpu_dbg_base.dev,
|
||||
"waddr=0x%x blk=%d tst=%d val=0x%x\n",
|
||||
head->wr_addr, head->block_id,
|
||||
head->test_id, status);
|
||||
@ -2055,7 +2055,7 @@ static void _dpu_dbg_dump_dpu_dbg_bus(struct dpu_dbg_dpu_debug_bus *bus)
|
||||
}
|
||||
_dpu_dbg_enable_power(false);
|
||||
|
||||
dev_info(dpu_dbg_base.dev, "======== end %s dump =========\n",
|
||||
DRM_DEV_INFO(dpu_dbg_base.dev, "======== end %s dump =========\n",
|
||||
bus->cmn.name);
|
||||
}
|
||||
|
||||
@ -2086,7 +2086,7 @@ static void _dpu_dbg_dump_vbif_debug_bus_entry(
|
||||
*dump_addr++ = val;
|
||||
}
|
||||
if (in_log)
|
||||
dev_info(dpu_dbg_base.dev,
|
||||
DRM_DEV_INFO(dpu_dbg_base.dev,
|
||||
"testpoint:%x arb/xin id=%d index=%d val=0x%x\n",
|
||||
head->block_bus_addr, i, j, val);
|
||||
}
|
||||
@ -2127,7 +2127,7 @@ static void _dpu_dbg_dump_vbif_dbg_bus(struct dpu_dbg_vbif_debug_bus *bus)
|
||||
list_size = bus->cmn.entries_size;
|
||||
dump_mem = &bus->cmn.dumped_content;
|
||||
|
||||
dev_info(dpu_dbg_base.dev, "======== start %s dump =========\n",
|
||||
DRM_DEV_INFO(dpu_dbg_base.dev, "======== start %s dump =========\n",
|
||||
bus->cmn.name);
|
||||
|
||||
if (!dump_mem || !dbg_bus || !bus_size || !list_size)
|
||||
@ -2155,7 +2155,7 @@ static void _dpu_dbg_dump_vbif_dbg_bus(struct dpu_dbg_vbif_debug_bus *bus)
|
||||
|
||||
if (*dump_mem) {
|
||||
dump_addr = *dump_mem;
|
||||
dev_info(dpu_dbg_base.dev,
|
||||
DRM_DEV_INFO(dpu_dbg_base.dev,
|
||||
"%s: start_addr:0x%pK len:0x%x\n",
|
||||
__func__, dump_addr, list_size);
|
||||
} else {
|
||||
@ -2180,7 +2180,7 @@ static void _dpu_dbg_dump_vbif_dbg_bus(struct dpu_dbg_vbif_debug_bus *bus)
|
||||
reg = readl_relaxed(mem_base + MMSS_VBIF_XIN_HALT_CTRL1);
|
||||
reg1 = readl_relaxed(mem_base + MMSS_VBIF_PND_ERR);
|
||||
reg2 = readl_relaxed(mem_base + MMSS_VBIF_SRC_ERR);
|
||||
dev_err(dpu_dbg_base.dev,
|
||||
DRM_DEV_ERROR(dpu_dbg_base.dev,
|
||||
"XIN HALT:0x%lX, PND ERR:0x%lX, SRC ERR:0x%lX\n",
|
||||
reg, reg1, reg2);
|
||||
reg >>= 16;
|
||||
@ -2194,7 +2194,7 @@ static void _dpu_dbg_dump_vbif_dbg_bus(struct dpu_dbg_vbif_debug_bus *bus)
|
||||
d0 = readl_relaxed(mem_base + MMSS_VBIF_ERR_INFO);
|
||||
d1 = readl_relaxed(mem_base + MMSS_VBIF_ERR_INFO_1);
|
||||
|
||||
dev_err(dpu_dbg_base.dev,
|
||||
DRM_DEV_ERROR(dpu_dbg_base.dev,
|
||||
"Client:%d, errinfo=0x%X, errinfo1=0x%X\n",
|
||||
i, d0, d1);
|
||||
}
|
||||
@ -2217,7 +2217,7 @@ static void _dpu_dbg_dump_vbif_dbg_bus(struct dpu_dbg_vbif_debug_bus *bus)
|
||||
|
||||
_dpu_dbg_enable_power(false);
|
||||
|
||||
dev_info(dpu_dbg_base.dev, "======== end %s dump =========\n",
|
||||
DRM_DEV_INFO(dpu_dbg_base.dev, "======== end %s dump =========\n",
|
||||
bus->cmn.name);
|
||||
}
|
||||
|
||||
|
@ -16,6 +16,8 @@
|
||||
#include <linux/err.h>
|
||||
#include <linux/delay.h>
|
||||
|
||||
#include <drm/drm_print.h>
|
||||
|
||||
#include "dpu_io_util.h"
|
||||
|
||||
void msm_dss_put_clk(struct dss_clk *clk_arry, int num_clk)
|
||||
@ -164,7 +166,7 @@ int msm_dss_parse_clock(struct platform_device *pdev,
|
||||
"clock-names", i,
|
||||
&clock_name);
|
||||
if (rc) {
|
||||
dev_err(&pdev->dev, "Failed to get clock name for %d\n",
|
||||
DRM_DEV_ERROR(&pdev->dev, "Failed to get clock name for %d\n",
|
||||
i);
|
||||
break;
|
||||
}
|
||||
@ -176,13 +178,13 @@ int msm_dss_parse_clock(struct platform_device *pdev,
|
||||
|
||||
rc = msm_dss_get_clk(&pdev->dev, mp->clk_config, num_clk);
|
||||
if (rc) {
|
||||
dev_err(&pdev->dev, "Failed to get clock refs %d\n", rc);
|
||||
DRM_DEV_ERROR(&pdev->dev, "Failed to get clock refs %d\n", rc);
|
||||
goto err;
|
||||
}
|
||||
|
||||
rc = of_clk_set_defaults(pdev->dev.of_node, false);
|
||||
if (rc) {
|
||||
dev_err(&pdev->dev, "Failed to set clock defaults %d\n", rc);
|
||||
DRM_DEV_ERROR(&pdev->dev, "Failed to set clock defaults %d\n", rc);
|
||||
goto err;
|
||||
}
|
||||
|
||||
|
@ -429,7 +429,7 @@ static int mdp4_crtc_cursor_set(struct drm_crtc *crtc,
|
||||
int ret;
|
||||
|
||||
if ((width > CURSOR_WIDTH) || (height > CURSOR_HEIGHT)) {
|
||||
dev_err(dev->dev, "bad cursor size: %dx%d\n", width, height);
|
||||
DRM_DEV_ERROR(dev->dev, "bad cursor size: %dx%d\n", width, height);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
@ -45,7 +45,7 @@ static void bs_init(struct mdp4_dtv_encoder *mdp4_dtv_encoder)
|
||||
struct lcdc_platform_data *dtv_pdata = mdp4_find_pdata("dtv.0");
|
||||
|
||||
if (!dtv_pdata) {
|
||||
dev_err(dev->dev, "could not find dtv pdata\n");
|
||||
DRM_DEV_ERROR(dev->dev, "could not find dtv pdata\n");
|
||||
return;
|
||||
}
|
||||
|
||||
@ -209,16 +209,16 @@ static void mdp4_dtv_encoder_enable(struct drm_encoder *encoder)
|
||||
|
||||
ret = clk_set_rate(mdp4_dtv_encoder->mdp_clk, pc);
|
||||
if (ret)
|
||||
dev_err(dev->dev, "failed to set mdp_clk to %lu: %d\n",
|
||||
DRM_DEV_ERROR(dev->dev, "failed to set mdp_clk to %lu: %d\n",
|
||||
pc, ret);
|
||||
|
||||
ret = clk_prepare_enable(mdp4_dtv_encoder->mdp_clk);
|
||||
if (ret)
|
||||
dev_err(dev->dev, "failed to enabled mdp_clk: %d\n", ret);
|
||||
DRM_DEV_ERROR(dev->dev, "failed to enabled mdp_clk: %d\n", ret);
|
||||
|
||||
ret = clk_prepare_enable(mdp4_dtv_encoder->hdmi_clk);
|
||||
if (ret)
|
||||
dev_err(dev->dev, "failed to enable hdmi_clk: %d\n", ret);
|
||||
DRM_DEV_ERROR(dev->dev, "failed to enable hdmi_clk: %d\n", ret);
|
||||
|
||||
mdp4_write(mdp4_kms, REG_MDP4_DTV_ENABLE, 1);
|
||||
|
||||
@ -258,14 +258,14 @@ struct drm_encoder *mdp4_dtv_encoder_init(struct drm_device *dev)
|
||||
|
||||
mdp4_dtv_encoder->hdmi_clk = devm_clk_get(dev->dev, "hdmi_clk");
|
||||
if (IS_ERR(mdp4_dtv_encoder->hdmi_clk)) {
|
||||
dev_err(dev->dev, "failed to get hdmi_clk\n");
|
||||
DRM_DEV_ERROR(dev->dev, "failed to get hdmi_clk\n");
|
||||
ret = PTR_ERR(mdp4_dtv_encoder->hdmi_clk);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
mdp4_dtv_encoder->mdp_clk = devm_clk_get(dev->dev, "tv_clk");
|
||||
if (IS_ERR(mdp4_dtv_encoder->mdp_clk)) {
|
||||
dev_err(dev->dev, "failed to get tv_clk\n");
|
||||
DRM_DEV_ERROR(dev->dev, "failed to get tv_clk\n");
|
||||
ret = PTR_ERR(mdp4_dtv_encoder->mdp_clk);
|
||||
goto fail;
|
||||
}
|
||||
|
@ -43,7 +43,7 @@ static int mdp4_hw_init(struct msm_kms *kms)
|
||||
DBG("found MDP4 version v%d.%d", major, minor);
|
||||
|
||||
if (major != 4) {
|
||||
dev_err(dev->dev, "unexpected MDP version: v%d.%d\n",
|
||||
DRM_DEV_ERROR(dev->dev, "unexpected MDP version: v%d.%d\n",
|
||||
major, minor);
|
||||
ret = -ENXIO;
|
||||
goto out;
|
||||
@ -251,7 +251,7 @@ static int mdp4_modeset_init_intf(struct mdp4_kms *mdp4_kms,
|
||||
|
||||
encoder = mdp4_lcdc_encoder_init(dev, panel_node);
|
||||
if (IS_ERR(encoder)) {
|
||||
dev_err(dev->dev, "failed to construct LCDC encoder\n");
|
||||
DRM_DEV_ERROR(dev->dev, "failed to construct LCDC encoder\n");
|
||||
return PTR_ERR(encoder);
|
||||
}
|
||||
|
||||
@ -260,7 +260,7 @@ static int mdp4_modeset_init_intf(struct mdp4_kms *mdp4_kms,
|
||||
|
||||
connector = mdp4_lvds_connector_init(dev, panel_node, encoder);
|
||||
if (IS_ERR(connector)) {
|
||||
dev_err(dev->dev, "failed to initialize LVDS connector\n");
|
||||
DRM_DEV_ERROR(dev->dev, "failed to initialize LVDS connector\n");
|
||||
return PTR_ERR(connector);
|
||||
}
|
||||
|
||||
@ -271,7 +271,7 @@ static int mdp4_modeset_init_intf(struct mdp4_kms *mdp4_kms,
|
||||
case DRM_MODE_ENCODER_TMDS:
|
||||
encoder = mdp4_dtv_encoder_init(dev);
|
||||
if (IS_ERR(encoder)) {
|
||||
dev_err(dev->dev, "failed to construct DTV encoder\n");
|
||||
DRM_DEV_ERROR(dev->dev, "failed to construct DTV encoder\n");
|
||||
return PTR_ERR(encoder);
|
||||
}
|
||||
|
||||
@ -282,7 +282,7 @@ static int mdp4_modeset_init_intf(struct mdp4_kms *mdp4_kms,
|
||||
/* Construct bridge/connector for HDMI: */
|
||||
ret = msm_hdmi_modeset_init(priv->hdmi, dev, encoder);
|
||||
if (ret) {
|
||||
dev_err(dev->dev, "failed to initialize HDMI: %d\n", ret);
|
||||
DRM_DEV_ERROR(dev->dev, "failed to initialize HDMI: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
@ -300,7 +300,7 @@ static int mdp4_modeset_init_intf(struct mdp4_kms *mdp4_kms,
|
||||
encoder = mdp4_dsi_encoder_init(dev);
|
||||
if (IS_ERR(encoder)) {
|
||||
ret = PTR_ERR(encoder);
|
||||
dev_err(dev->dev,
|
||||
DRM_DEV_ERROR(dev->dev,
|
||||
"failed to construct DSI encoder: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
@ -311,14 +311,14 @@ static int mdp4_modeset_init_intf(struct mdp4_kms *mdp4_kms,
|
||||
|
||||
ret = msm_dsi_modeset_init(priv->dsi[dsi_id], dev, encoder);
|
||||
if (ret) {
|
||||
dev_err(dev->dev, "failed to initialize DSI: %d\n",
|
||||
DRM_DEV_ERROR(dev->dev, "failed to initialize DSI: %d\n",
|
||||
ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
break;
|
||||
default:
|
||||
dev_err(dev->dev, "Invalid or unsupported interface\n");
|
||||
DRM_DEV_ERROR(dev->dev, "Invalid or unsupported interface\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@ -354,7 +354,7 @@ static int modeset_init(struct mdp4_kms *mdp4_kms)
|
||||
for (i = 0; i < ARRAY_SIZE(vg_planes); i++) {
|
||||
plane = mdp4_plane_init(dev, vg_planes[i], false);
|
||||
if (IS_ERR(plane)) {
|
||||
dev_err(dev->dev,
|
||||
DRM_DEV_ERROR(dev->dev,
|
||||
"failed to construct plane for VG%d\n", i + 1);
|
||||
ret = PTR_ERR(plane);
|
||||
goto fail;
|
||||
@ -365,7 +365,7 @@ static int modeset_init(struct mdp4_kms *mdp4_kms)
|
||||
for (i = 0; i < ARRAY_SIZE(mdp4_crtcs); i++) {
|
||||
plane = mdp4_plane_init(dev, rgb_planes[i], true);
|
||||
if (IS_ERR(plane)) {
|
||||
dev_err(dev->dev,
|
||||
DRM_DEV_ERROR(dev->dev,
|
||||
"failed to construct plane for RGB%d\n", i + 1);
|
||||
ret = PTR_ERR(plane);
|
||||
goto fail;
|
||||
@ -374,7 +374,7 @@ static int modeset_init(struct mdp4_kms *mdp4_kms)
|
||||
crtc = mdp4_crtc_init(dev, plane, priv->num_crtcs, i,
|
||||
mdp4_crtcs[i]);
|
||||
if (IS_ERR(crtc)) {
|
||||
dev_err(dev->dev, "failed to construct crtc for %s\n",
|
||||
DRM_DEV_ERROR(dev->dev, "failed to construct crtc for %s\n",
|
||||
mdp4_crtc_names[i]);
|
||||
ret = PTR_ERR(crtc);
|
||||
goto fail;
|
||||
@ -396,7 +396,7 @@ static int modeset_init(struct mdp4_kms *mdp4_kms)
|
||||
for (i = 0; i < ARRAY_SIZE(mdp4_intfs); i++) {
|
||||
ret = mdp4_modeset_init_intf(mdp4_kms, mdp4_intfs[i]);
|
||||
if (ret) {
|
||||
dev_err(dev->dev, "failed to initialize intf: %d, %d\n",
|
||||
DRM_DEV_ERROR(dev->dev, "failed to initialize intf: %d, %d\n",
|
||||
i, ret);
|
||||
goto fail;
|
||||
}
|
||||
@ -419,7 +419,7 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
|
||||
|
||||
mdp4_kms = kzalloc(sizeof(*mdp4_kms), GFP_KERNEL);
|
||||
if (!mdp4_kms) {
|
||||
dev_err(dev->dev, "failed to allocate kms\n");
|
||||
DRM_DEV_ERROR(dev->dev, "failed to allocate kms\n");
|
||||
ret = -ENOMEM;
|
||||
goto fail;
|
||||
}
|
||||
@ -439,7 +439,7 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
|
||||
irq = platform_get_irq(pdev, 0);
|
||||
if (irq < 0) {
|
||||
ret = irq;
|
||||
dev_err(dev->dev, "failed to get irq: %d\n", ret);
|
||||
DRM_DEV_ERROR(dev->dev, "failed to get irq: %d\n", ret);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
@ -456,14 +456,14 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
|
||||
if (mdp4_kms->vdd) {
|
||||
ret = regulator_enable(mdp4_kms->vdd);
|
||||
if (ret) {
|
||||
dev_err(dev->dev, "failed to enable regulator vdd: %d\n", ret);
|
||||
DRM_DEV_ERROR(dev->dev, "failed to enable regulator vdd: %d\n", ret);
|
||||
goto fail;
|
||||
}
|
||||
}
|
||||
|
||||
mdp4_kms->clk = devm_clk_get(&pdev->dev, "core_clk");
|
||||
if (IS_ERR(mdp4_kms->clk)) {
|
||||
dev_err(dev->dev, "failed to get core_clk\n");
|
||||
DRM_DEV_ERROR(dev->dev, "failed to get core_clk\n");
|
||||
ret = PTR_ERR(mdp4_kms->clk);
|
||||
goto fail;
|
||||
}
|
||||
@ -475,14 +475,14 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
|
||||
// XXX if (rev >= MDP_REV_42) { ???
|
||||
mdp4_kms->lut_clk = devm_clk_get(&pdev->dev, "lut_clk");
|
||||
if (IS_ERR(mdp4_kms->lut_clk)) {
|
||||
dev_err(dev->dev, "failed to get lut_clk\n");
|
||||
DRM_DEV_ERROR(dev->dev, "failed to get lut_clk\n");
|
||||
ret = PTR_ERR(mdp4_kms->lut_clk);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
mdp4_kms->axi_clk = devm_clk_get(&pdev->dev, "bus_clk");
|
||||
if (IS_ERR(mdp4_kms->axi_clk)) {
|
||||
dev_err(dev->dev, "failed to get axi_clk\n");
|
||||
DRM_DEV_ERROR(dev->dev, "failed to get axi_clk\n");
|
||||
ret = PTR_ERR(mdp4_kms->axi_clk);
|
||||
goto fail;
|
||||
}
|
||||
@ -519,21 +519,21 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
|
||||
if (ret)
|
||||
goto fail;
|
||||
} else {
|
||||
dev_info(dev->dev, "no iommu, fallback to phys "
|
||||
DRM_DEV_INFO(dev->dev, "no iommu, fallback to phys "
|
||||
"contig buffers for scanout\n");
|
||||
aspace = NULL;
|
||||
}
|
||||
|
||||
ret = modeset_init(mdp4_kms);
|
||||
if (ret) {
|
||||
dev_err(dev->dev, "modeset_init failed: %d\n", ret);
|
||||
DRM_DEV_ERROR(dev->dev, "modeset_init failed: %d\n", ret);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
mdp4_kms->blank_cursor_bo = msm_gem_new(dev, SZ_16K, MSM_BO_WC);
|
||||
if (IS_ERR(mdp4_kms->blank_cursor_bo)) {
|
||||
ret = PTR_ERR(mdp4_kms->blank_cursor_bo);
|
||||
dev_err(dev->dev, "could not allocate blank-cursor bo: %d\n", ret);
|
||||
DRM_DEV_ERROR(dev->dev, "could not allocate blank-cursor bo: %d\n", ret);
|
||||
mdp4_kms->blank_cursor_bo = NULL;
|
||||
goto fail;
|
||||
}
|
||||
@ -541,7 +541,7 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
|
||||
ret = msm_gem_get_iova(mdp4_kms->blank_cursor_bo, kms->aspace,
|
||||
&mdp4_kms->blank_cursor_iova);
|
||||
if (ret) {
|
||||
dev_err(dev->dev, "could not pin blank-cursor bo: %d\n", ret);
|
||||
DRM_DEV_ERROR(dev->dev, "could not pin blank-cursor bo: %d\n", ret);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
|
@ -47,7 +47,7 @@ static void bs_init(struct mdp4_lcdc_encoder *mdp4_lcdc_encoder)
|
||||
struct lcdc_platform_data *lcdc_pdata = mdp4_find_pdata("lvds.0");
|
||||
|
||||
if (!lcdc_pdata) {
|
||||
dev_err(dev->dev, "could not find lvds pdata\n");
|
||||
DRM_DEV_ERROR(dev->dev, "could not find lvds pdata\n");
|
||||
return;
|
||||
}
|
||||
|
||||
@ -224,7 +224,7 @@ static void setup_phy(struct drm_encoder *encoder)
|
||||
break;
|
||||
|
||||
default:
|
||||
dev_err(dev->dev, "unknown bpp: %d\n", bpp);
|
||||
DRM_DEV_ERROR(dev->dev, "unknown bpp: %d\n", bpp);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -241,7 +241,7 @@ static void setup_phy(struct drm_encoder *encoder)
|
||||
MDP4_LCDC_LVDS_INTF_CTL_CH1_CLK_LANE_EN;
|
||||
break;
|
||||
default:
|
||||
dev_err(dev->dev, "unknown # of channels: %d\n", nchan);
|
||||
DRM_DEV_ERROR(dev->dev, "unknown # of channels: %d\n", nchan);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -361,7 +361,7 @@ static void mdp4_lcdc_encoder_disable(struct drm_encoder *encoder)
|
||||
for (i = 0; i < ARRAY_SIZE(mdp4_lcdc_encoder->regs); i++) {
|
||||
ret = regulator_disable(mdp4_lcdc_encoder->regs[i]);
|
||||
if (ret)
|
||||
dev_err(dev->dev, "failed to disable regulator: %d\n", ret);
|
||||
DRM_DEV_ERROR(dev->dev, "failed to disable regulator: %d\n", ret);
|
||||
}
|
||||
|
||||
bs_set(mdp4_lcdc_encoder, 0);
|
||||
@ -398,16 +398,16 @@ static void mdp4_lcdc_encoder_enable(struct drm_encoder *encoder)
|
||||
for (i = 0; i < ARRAY_SIZE(mdp4_lcdc_encoder->regs); i++) {
|
||||
ret = regulator_enable(mdp4_lcdc_encoder->regs[i]);
|
||||
if (ret)
|
||||
dev_err(dev->dev, "failed to enable regulator: %d\n", ret);
|
||||
DRM_DEV_ERROR(dev->dev, "failed to enable regulator: %d\n", ret);
|
||||
}
|
||||
|
||||
DBG("setting lcdc_clk=%lu", pc);
|
||||
ret = clk_set_rate(mdp4_lcdc_encoder->lcdc_clk, pc);
|
||||
if (ret)
|
||||
dev_err(dev->dev, "failed to configure lcdc_clk: %d\n", ret);
|
||||
DRM_DEV_ERROR(dev->dev, "failed to configure lcdc_clk: %d\n", ret);
|
||||
ret = clk_prepare_enable(mdp4_lcdc_encoder->lcdc_clk);
|
||||
if (ret)
|
||||
dev_err(dev->dev, "failed to enable lcdc_clk: %d\n", ret);
|
||||
DRM_DEV_ERROR(dev->dev, "failed to enable lcdc_clk: %d\n", ret);
|
||||
|
||||
panel = of_drm_find_panel(mdp4_lcdc_encoder->panel_node);
|
||||
if (!IS_ERR(panel)) {
|
||||
@ -461,7 +461,7 @@ struct drm_encoder *mdp4_lcdc_encoder_init(struct drm_device *dev,
|
||||
/* TODO: do we need different pll in other cases? */
|
||||
mdp4_lcdc_encoder->lcdc_clk = mpd4_lvds_pll_init(dev);
|
||||
if (IS_ERR(mdp4_lcdc_encoder->lcdc_clk)) {
|
||||
dev_err(dev->dev, "failed to get lvds_clk\n");
|
||||
DRM_DEV_ERROR(dev->dev, "failed to get lvds_clk\n");
|
||||
ret = PTR_ERR(mdp4_lcdc_encoder->lcdc_clk);
|
||||
goto fail;
|
||||
}
|
||||
@ -470,7 +470,7 @@ struct drm_encoder *mdp4_lcdc_encoder_init(struct drm_device *dev,
|
||||
reg = devm_regulator_get(dev->dev, "lvds-vccs-3p3v");
|
||||
if (IS_ERR(reg)) {
|
||||
ret = PTR_ERR(reg);
|
||||
dev_err(dev->dev, "failed to get lvds-vccs-3p3v: %d\n", ret);
|
||||
DRM_DEV_ERROR(dev->dev, "failed to get lvds-vccs-3p3v: %d\n", ret);
|
||||
goto fail;
|
||||
}
|
||||
mdp4_lcdc_encoder->regs[0] = reg;
|
||||
@ -478,7 +478,7 @@ struct drm_encoder *mdp4_lcdc_encoder_init(struct drm_device *dev,
|
||||
reg = devm_regulator_get(dev->dev, "lvds-pll-vdda");
|
||||
if (IS_ERR(reg)) {
|
||||
ret = PTR_ERR(reg);
|
||||
dev_err(dev->dev, "failed to get lvds-pll-vdda: %d\n", ret);
|
||||
DRM_DEV_ERROR(dev->dev, "failed to get lvds-pll-vdda: %d\n", ret);
|
||||
goto fail;
|
||||
}
|
||||
mdp4_lcdc_encoder->regs[1] = reg;
|
||||
@ -486,7 +486,7 @@ struct drm_encoder *mdp4_lcdc_encoder_init(struct drm_device *dev,
|
||||
reg = devm_regulator_get(dev->dev, "lvds-vdda");
|
||||
if (IS_ERR(reg)) {
|
||||
ret = PTR_ERR(reg);
|
||||
dev_err(dev->dev, "failed to get lvds-vdda: %d\n", ret);
|
||||
DRM_DEV_ERROR(dev->dev, "failed to get lvds-vdda: %d\n", ret);
|
||||
goto fail;
|
||||
}
|
||||
mdp4_lcdc_encoder->regs[2] = reg;
|
||||
|
@ -234,22 +234,22 @@ static int mdp4_plane_mode_set(struct drm_plane *plane,
|
||||
format = to_mdp_format(msm_framebuffer_format(fb));
|
||||
|
||||
if (src_w > (crtc_w * DOWN_SCALE_MAX)) {
|
||||
dev_err(dev->dev, "Width down scaling exceeds limits!\n");
|
||||
DRM_DEV_ERROR(dev->dev, "Width down scaling exceeds limits!\n");
|
||||
return -ERANGE;
|
||||
}
|
||||
|
||||
if (src_h > (crtc_h * DOWN_SCALE_MAX)) {
|
||||
dev_err(dev->dev, "Height down scaling exceeds limits!\n");
|
||||
DRM_DEV_ERROR(dev->dev, "Height down scaling exceeds limits!\n");
|
||||
return -ERANGE;
|
||||
}
|
||||
|
||||
if (crtc_w > (src_w * UP_SCALE_MAX)) {
|
||||
dev_err(dev->dev, "Width up scaling exceeds limits!\n");
|
||||
DRM_DEV_ERROR(dev->dev, "Width up scaling exceeds limits!\n");
|
||||
return -ERANGE;
|
||||
}
|
||||
|
||||
if (crtc_h > (src_h * UP_SCALE_MAX)) {
|
||||
dev_err(dev->dev, "Height up scaling exceeds limits!\n");
|
||||
DRM_DEV_ERROR(dev->dev, "Height up scaling exceeds limits!\n");
|
||||
return -ERANGE;
|
||||
}
|
||||
|
||||
|
@ -600,7 +600,7 @@ struct mdp5_cfg_handler *mdp5_cfg_init(struct mdp5_kms *mdp5_kms,
|
||||
}
|
||||
|
||||
if (major != 1) {
|
||||
dev_err(dev->dev, "unexpected MDP major version: v%d.%d\n",
|
||||
DRM_DEV_ERROR(dev->dev, "unexpected MDP major version: v%d.%d\n",
|
||||
major, minor);
|
||||
ret = -ENXIO;
|
||||
goto fail;
|
||||
@ -615,7 +615,7 @@ struct mdp5_cfg_handler *mdp5_cfg_init(struct mdp5_kms *mdp5_kms,
|
||||
break;
|
||||
}
|
||||
if (unlikely(!mdp5_cfg)) {
|
||||
dev_err(dev->dev, "unexpected MDP minor revision: v%d.%d\n",
|
||||
DRM_DEV_ERROR(dev->dev, "unexpected MDP minor revision: v%d.%d\n",
|
||||
major, minor);
|
||||
ret = -ENXIO;
|
||||
goto fail;
|
||||
|
@ -55,20 +55,20 @@ static int pingpong_tearcheck_setup(struct drm_encoder *encoder,
|
||||
int pp_id = mixer->pp;
|
||||
|
||||
if (IS_ERR_OR_NULL(mdp5_kms->vsync_clk)) {
|
||||
dev_err(dev, "vsync_clk is not initialized\n");
|
||||
DRM_DEV_ERROR(dev, "vsync_clk is not initialized\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
total_lines_x100 = mode->vtotal * mode->vrefresh;
|
||||
if (!total_lines_x100) {
|
||||
dev_err(dev, "%s: vtotal(%d) or vrefresh(%d) is 0\n",
|
||||
DRM_DEV_ERROR(dev, "%s: vtotal(%d) or vrefresh(%d) is 0\n",
|
||||
__func__, mode->vtotal, mode->vrefresh);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
vsync_clk_speed = clk_round_rate(mdp5_kms->vsync_clk, VSYNC_CLK_RATE);
|
||||
if (vsync_clk_speed <= 0) {
|
||||
dev_err(dev, "vsync_clk round rate failed %ld\n",
|
||||
DRM_DEV_ERROR(dev, "vsync_clk round rate failed %ld\n",
|
||||
vsync_clk_speed);
|
||||
return -EINVAL;
|
||||
}
|
||||
@ -102,13 +102,13 @@ static int pingpong_tearcheck_enable(struct drm_encoder *encoder)
|
||||
ret = clk_set_rate(mdp5_kms->vsync_clk,
|
||||
clk_round_rate(mdp5_kms->vsync_clk, VSYNC_CLK_RATE));
|
||||
if (ret) {
|
||||
dev_err(encoder->dev->dev,
|
||||
DRM_DEV_ERROR(encoder->dev->dev,
|
||||
"vsync_clk clk_set_rate failed, %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
ret = clk_prepare_enable(mdp5_kms->vsync_clk);
|
||||
if (ret) {
|
||||
dev_err(encoder->dev->dev,
|
||||
DRM_DEV_ERROR(encoder->dev->dev,
|
||||
"vsync_clk clk_prepare_enable failed, %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
@ -662,7 +662,7 @@ static int mdp5_crtc_atomic_check(struct drm_crtc *crtc,
|
||||
|
||||
ret = mdp5_crtc_setup_pipeline(crtc, state, need_right_mixer);
|
||||
if (ret) {
|
||||
dev_err(dev->dev, "couldn't assign mixers %d\n", ret);
|
||||
DRM_DEV_ERROR(dev->dev, "couldn't assign mixers %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -679,7 +679,7 @@ static int mdp5_crtc_atomic_check(struct drm_crtc *crtc,
|
||||
* and that we don't have conflicting mixer stages:
|
||||
*/
|
||||
if ((cnt + start - 1) >= hw_cfg->lm.nb_stages) {
|
||||
dev_err(dev->dev, "too many planes! cnt=%d, start stage=%d\n",
|
||||
DRM_DEV_ERROR(dev->dev, "too many planes! cnt=%d, start stage=%d\n",
|
||||
cnt, start);
|
||||
return -EINVAL;
|
||||
}
|
||||
@ -879,7 +879,7 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
|
||||
}
|
||||
|
||||
if ((width > CURSOR_WIDTH) || (height > CURSOR_HEIGHT)) {
|
||||
dev_err(dev->dev, "bad cursor size: %dx%d\n", width, height);
|
||||
DRM_DEV_ERROR(dev->dev, "bad cursor size: %dx%d\n", width, height);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@ -924,7 +924,7 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
|
||||
set_cursor:
|
||||
ret = mdp5_ctl_set_cursor(ctl, pipeline, 0, cursor_enable);
|
||||
if (ret) {
|
||||
dev_err(dev->dev, "failed to %sable cursor: %d\n",
|
||||
DRM_DEV_ERROR(dev->dev, "failed to %sable cursor: %d\n",
|
||||
cursor_enable ? "en" : "dis", ret);
|
||||
goto end;
|
||||
}
|
||||
|
@ -262,13 +262,13 @@ int mdp5_ctl_set_cursor(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline,
|
||||
struct mdp5_hw_mixer *mixer = pipeline->mixer;
|
||||
|
||||
if (unlikely(WARN_ON(!mixer))) {
|
||||
dev_err(ctl_mgr->dev->dev, "CTL %d cannot find LM",
|
||||
DRM_DEV_ERROR(ctl_mgr->dev->dev, "CTL %d cannot find LM",
|
||||
ctl->id);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (pipeline->r_mixer) {
|
||||
dev_err(ctl_mgr->dev->dev, "unsupported configuration");
|
||||
DRM_DEV_ERROR(ctl_mgr->dev->dev, "unsupported configuration");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@ -604,10 +604,10 @@ int mdp5_ctl_pair(struct mdp5_ctl *ctlx, struct mdp5_ctl *ctly, bool enable)
|
||||
mdp5_write(mdp5_kms, REG_MDP5_SPARE_0, 0);
|
||||
return 0;
|
||||
} else if ((ctlx->pair != NULL) || (ctly->pair != NULL)) {
|
||||
dev_err(ctl_mgr->dev->dev, "CTLs already paired\n");
|
||||
DRM_DEV_ERROR(ctl_mgr->dev->dev, "CTLs already paired\n");
|
||||
return -EINVAL;
|
||||
} else if (!(ctlx->status & ctly->status & CTL_STAT_BOOKED)) {
|
||||
dev_err(ctl_mgr->dev->dev, "Only pair booked CTLs\n");
|
||||
DRM_DEV_ERROR(ctl_mgr->dev->dev, "Only pair booked CTLs\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@ -652,7 +652,7 @@ struct mdp5_ctl *mdp5_ctlm_request(struct mdp5_ctl_manager *ctl_mgr,
|
||||
if ((ctl_mgr->ctls[c].status & checkm) == match)
|
||||
goto found;
|
||||
|
||||
dev_err(ctl_mgr->dev->dev, "No more CTL available!");
|
||||
DRM_DEV_ERROR(ctl_mgr->dev->dev, "No more CTL available!");
|
||||
goto unlock;
|
||||
|
||||
found:
|
||||
@ -698,13 +698,13 @@ struct mdp5_ctl_manager *mdp5_ctlm_init(struct drm_device *dev,
|
||||
|
||||
ctl_mgr = kzalloc(sizeof(*ctl_mgr), GFP_KERNEL);
|
||||
if (!ctl_mgr) {
|
||||
dev_err(dev->dev, "failed to allocate CTL manager\n");
|
||||
DRM_DEV_ERROR(dev->dev, "failed to allocate CTL manager\n");
|
||||
ret = -ENOMEM;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
if (unlikely(WARN_ON(ctl_cfg->count > MAX_CTL))) {
|
||||
dev_err(dev->dev, "Increase static pool size to at least %d\n",
|
||||
DRM_DEV_ERROR(dev->dev, "Increase static pool size to at least %d\n",
|
||||
ctl_cfg->count);
|
||||
ret = -ENOSPC;
|
||||
goto fail;
|
||||
@ -723,7 +723,7 @@ struct mdp5_ctl_manager *mdp5_ctlm_init(struct drm_device *dev,
|
||||
struct mdp5_ctl *ctl = &ctl_mgr->ctls[c];
|
||||
|
||||
if (WARN_ON(!ctl_cfg->base[c])) {
|
||||
dev_err(dev->dev, "CTL_%d: base is null!\n", c);
|
||||
DRM_DEV_ERROR(dev->dev, "CTL_%d: base is null!\n", c);
|
||||
ret = -EINVAL;
|
||||
spin_unlock_irqrestore(&ctl_mgr->pool_lock, flags);
|
||||
goto fail;
|
||||
|
@ -264,7 +264,7 @@ static int mdp5_kms_debugfs_init(struct msm_kms *kms, struct drm_minor *minor)
|
||||
minor->debugfs_root, minor);
|
||||
|
||||
if (ret) {
|
||||
dev_err(dev->dev, "could not install mdp5_debugfs_list\n");
|
||||
DRM_DEV_ERROR(dev->dev, "could not install mdp5_debugfs_list\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -337,7 +337,7 @@ static struct drm_encoder *construct_encoder(struct mdp5_kms *mdp5_kms,
|
||||
|
||||
encoder = mdp5_encoder_init(dev, intf, ctl);
|
||||
if (IS_ERR(encoder)) {
|
||||
dev_err(dev->dev, "failed to construct encoder\n");
|
||||
DRM_DEV_ERROR(dev->dev, "failed to construct encoder\n");
|
||||
return encoder;
|
||||
}
|
||||
|
||||
@ -418,7 +418,7 @@ static int modeset_init_intf(struct mdp5_kms *mdp5_kms,
|
||||
int dsi_id = get_dsi_id_from_intf(hw_cfg, intf->num);
|
||||
|
||||
if ((dsi_id >= ARRAY_SIZE(priv->dsi)) || (dsi_id < 0)) {
|
||||
dev_err(dev->dev, "failed to find dsi from intf %d\n",
|
||||
DRM_DEV_ERROR(dev->dev, "failed to find dsi from intf %d\n",
|
||||
intf->num);
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
@ -443,7 +443,7 @@ static int modeset_init_intf(struct mdp5_kms *mdp5_kms,
|
||||
break;
|
||||
}
|
||||
default:
|
||||
dev_err(dev->dev, "unknown intf: %d\n", intf->type);
|
||||
DRM_DEV_ERROR(dev->dev, "unknown intf: %d\n", intf->type);
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
}
|
||||
@ -500,7 +500,7 @@ static int modeset_init(struct mdp5_kms *mdp5_kms)
|
||||
plane = mdp5_plane_init(dev, type);
|
||||
if (IS_ERR(plane)) {
|
||||
ret = PTR_ERR(plane);
|
||||
dev_err(dev->dev, "failed to construct plane %d (%d)\n", i, ret);
|
||||
DRM_DEV_ERROR(dev->dev, "failed to construct plane %d (%d)\n", i, ret);
|
||||
goto fail;
|
||||
}
|
||||
priv->planes[priv->num_planes++] = plane;
|
||||
@ -517,7 +517,7 @@ static int modeset_init(struct mdp5_kms *mdp5_kms)
|
||||
crtc = mdp5_crtc_init(dev, primary[i], cursor[i], i);
|
||||
if (IS_ERR(crtc)) {
|
||||
ret = PTR_ERR(crtc);
|
||||
dev_err(dev->dev, "failed to construct crtc %d (%d)\n", i, ret);
|
||||
DRM_DEV_ERROR(dev->dev, "failed to construct crtc %d (%d)\n", i, ret);
|
||||
goto fail;
|
||||
}
|
||||
priv->crtcs[priv->num_crtcs++] = crtc;
|
||||
@ -552,7 +552,7 @@ static void read_mdp_hw_revision(struct mdp5_kms *mdp5_kms,
|
||||
*major = FIELD(version, MDP5_HW_VERSION_MAJOR);
|
||||
*minor = FIELD(version, MDP5_HW_VERSION_MINOR);
|
||||
|
||||
dev_info(dev, "MDP5 version v%d.%d", *major, *minor);
|
||||
DRM_DEV_INFO(dev, "MDP5 version v%d.%d", *major, *minor);
|
||||
}
|
||||
|
||||
static int get_clk(struct platform_device *pdev, struct clk **clkp,
|
||||
@ -561,7 +561,7 @@ static int get_clk(struct platform_device *pdev, struct clk **clkp,
|
||||
struct device *dev = &pdev->dev;
|
||||
struct clk *clk = msm_clk_get(pdev, name);
|
||||
if (IS_ERR(clk) && mandatory) {
|
||||
dev_err(dev, "failed to get %s (%ld)\n", name, PTR_ERR(clk));
|
||||
DRM_DEV_ERROR(dev, "failed to get %s (%ld)\n", name, PTR_ERR(clk));
|
||||
return PTR_ERR(clk);
|
||||
}
|
||||
if (IS_ERR(clk))
|
||||
@ -688,7 +688,7 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
|
||||
irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
|
||||
if (irq < 0) {
|
||||
ret = irq;
|
||||
dev_err(&pdev->dev, "failed to get irq: %d\n", ret);
|
||||
DRM_DEV_ERROR(&pdev->dev, "failed to get irq: %d\n", ret);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
@ -724,12 +724,12 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
|
||||
ret = aspace->mmu->funcs->attach(aspace->mmu, iommu_ports,
|
||||
ARRAY_SIZE(iommu_ports));
|
||||
if (ret) {
|
||||
dev_err(&pdev->dev, "failed to attach iommu: %d\n",
|
||||
DRM_DEV_ERROR(&pdev->dev, "failed to attach iommu: %d\n",
|
||||
ret);
|
||||
goto fail;
|
||||
}
|
||||
} else {
|
||||
dev_info(&pdev->dev,
|
||||
DRM_DEV_INFO(&pdev->dev,
|
||||
"no iommu, fallback to phys contig buffers for scanout\n");
|
||||
aspace = NULL;
|
||||
}
|
||||
@ -738,7 +738,7 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
|
||||
|
||||
ret = modeset_init(mdp5_kms);
|
||||
if (ret) {
|
||||
dev_err(&pdev->dev, "modeset_init failed: %d\n", ret);
|
||||
DRM_DEV_ERROR(&pdev->dev, "modeset_init failed: %d\n", ret);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
@ -795,7 +795,7 @@ static int construct_pipes(struct mdp5_kms *mdp5_kms, int cnt,
|
||||
hwpipe = mdp5_pipe_init(pipes[i], offsets[i], caps);
|
||||
if (IS_ERR(hwpipe)) {
|
||||
ret = PTR_ERR(hwpipe);
|
||||
dev_err(dev->dev, "failed to construct pipe for %s (%d)\n",
|
||||
DRM_DEV_ERROR(dev->dev, "failed to construct pipe for %s (%d)\n",
|
||||
pipe2name(pipes[i]), ret);
|
||||
return ret;
|
||||
}
|
||||
@ -867,7 +867,7 @@ static int hwmixer_init(struct mdp5_kms *mdp5_kms)
|
||||
mixer = mdp5_mixer_init(&hw_cfg->lm.instances[i]);
|
||||
if (IS_ERR(mixer)) {
|
||||
ret = PTR_ERR(mixer);
|
||||
dev_err(dev->dev, "failed to construct LM%d (%d)\n",
|
||||
DRM_DEV_ERROR(dev->dev, "failed to construct LM%d (%d)\n",
|
||||
i, ret);
|
||||
return ret;
|
||||
}
|
||||
@ -897,7 +897,7 @@ static int interface_init(struct mdp5_kms *mdp5_kms)
|
||||
|
||||
intf = kzalloc(sizeof(*intf), GFP_KERNEL);
|
||||
if (!intf) {
|
||||
dev_err(dev->dev, "failed to construct INTF%d\n", i);
|
||||
DRM_DEV_ERROR(dev->dev, "failed to construct INTF%d\n", i);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
|
@ -132,7 +132,7 @@ static int mdss_irq_domain_init(struct mdp5_mdss *mdp5_mdss)
|
||||
d = irq_domain_add_linear(dev->of_node, 32, &mdss_hw_irqdomain_ops,
|
||||
mdp5_mdss);
|
||||
if (!d) {
|
||||
dev_err(dev, "mdss irq domain add failed\n");
|
||||
DRM_DEV_ERROR(dev, "mdss irq domain add failed\n");
|
||||
return -ENXIO;
|
||||
}
|
||||
|
||||
@ -246,7 +246,7 @@ int mdp5_mdss_init(struct drm_device *dev)
|
||||
|
||||
ret = msm_mdss_get_clocks(mdp5_mdss);
|
||||
if (ret) {
|
||||
dev_err(dev->dev, "failed to get clocks: %d\n", ret);
|
||||
DRM_DEV_ERROR(dev->dev, "failed to get clocks: %d\n", ret);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
@ -259,7 +259,7 @@ int mdp5_mdss_init(struct drm_device *dev)
|
||||
|
||||
ret = regulator_enable(mdp5_mdss->vdd);
|
||||
if (ret) {
|
||||
dev_err(dev->dev, "failed to enable regulator vdd: %d\n",
|
||||
DRM_DEV_ERROR(dev->dev, "failed to enable regulator vdd: %d\n",
|
||||
ret);
|
||||
goto fail;
|
||||
}
|
||||
@ -267,13 +267,13 @@ int mdp5_mdss_init(struct drm_device *dev)
|
||||
ret = devm_request_irq(dev->dev, platform_get_irq(pdev, 0),
|
||||
mdss_irq, 0, "mdss_isr", mdp5_mdss);
|
||||
if (ret) {
|
||||
dev_err(dev->dev, "failed to init irq: %d\n", ret);
|
||||
DRM_DEV_ERROR(dev->dev, "failed to init irq: %d\n", ret);
|
||||
goto fail_irq;
|
||||
}
|
||||
|
||||
ret = mdss_irq_domain_init(mdp5_mdss);
|
||||
if (ret) {
|
||||
dev_err(dev->dev, "failed to init sub-block irqs: %d\n", ret);
|
||||
DRM_DEV_ERROR(dev->dev, "failed to init sub-block irqs: %d\n", ret);
|
||||
goto fail_irq;
|
||||
}
|
||||
|
||||
|
@ -125,7 +125,7 @@ static int mdp5_plane_atomic_set_property(struct drm_plane *plane,
|
||||
|
||||
SET_PROPERTY(zpos, ZPOS, uint8_t);
|
||||
|
||||
dev_err(dev->dev, "Invalid property\n");
|
||||
DRM_DEV_ERROR(dev->dev, "Invalid property\n");
|
||||
ret = -EINVAL;
|
||||
done:
|
||||
return ret;
|
||||
@ -153,7 +153,7 @@ static int mdp5_plane_atomic_get_property(struct drm_plane *plane,
|
||||
|
||||
GET_PROPERTY(zpos, ZPOS, uint8_t);
|
||||
|
||||
dev_err(dev->dev, "Invalid property\n");
|
||||
DRM_DEV_ERROR(dev->dev, "Invalid property\n");
|
||||
ret = -EINVAL;
|
||||
done:
|
||||
return ret;
|
||||
@ -658,7 +658,7 @@ static int calc_scalex_steps(struct drm_plane *plane,
|
||||
|
||||
ret = calc_phase_step(src, dest, &phasex_step);
|
||||
if (ret) {
|
||||
dev_err(dev, "X scaling (%d->%d) failed: %d\n", src, dest, ret);
|
||||
DRM_DEV_ERROR(dev, "X scaling (%d->%d) failed: %d\n", src, dest, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -683,7 +683,7 @@ static int calc_scaley_steps(struct drm_plane *plane,
|
||||
|
||||
ret = calc_phase_step(src, dest, &phasey_step);
|
||||
if (ret) {
|
||||
dev_err(dev, "Y scaling (%d->%d) failed: %d\n", src, dest, ret);
|
||||
DRM_DEV_ERROR(dev, "Y scaling (%d->%d) failed: %d\n", src, dest, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -88,7 +88,7 @@ static int smp_request_block(struct mdp5_smp *smp,
|
||||
|
||||
avail = cnt - bitmap_weight(state->state, cnt);
|
||||
if (nblks > avail) {
|
||||
dev_err(smp->dev->dev, "out of blks (req=%d > avail=%d)\n",
|
||||
DRM_DEV_ERROR(smp->dev->dev, "out of blks (req=%d > avail=%d)\n",
|
||||
nblks, avail);
|
||||
return -ENOSPC;
|
||||
}
|
||||
@ -188,7 +188,7 @@ int mdp5_smp_assign(struct mdp5_smp *smp, struct mdp5_smp_state *state,
|
||||
DBG("%s[%d]: request %d SMP blocks", pipe2name(pipe), i, n);
|
||||
ret = smp_request_block(smp, state, cid, n);
|
||||
if (ret) {
|
||||
dev_err(dev->dev, "Cannot allocate %d SMP blocks: %d\n",
|
||||
DRM_DEV_ERROR(dev->dev, "Cannot allocate %d SMP blocks: %d\n",
|
||||
n, ret);
|
||||
return ret;
|
||||
}
|
||||
|
@ -29,7 +29,7 @@ static int dsi_get_phy(struct msm_dsi *msm_dsi)
|
||||
|
||||
phy_node = of_parse_phandle(pdev->dev.of_node, "phys", 0);
|
||||
if (!phy_node) {
|
||||
dev_err(&pdev->dev, "cannot find phy device\n");
|
||||
DRM_DEV_ERROR(&pdev->dev, "cannot find phy device\n");
|
||||
return -ENXIO;
|
||||
}
|
||||
|
||||
@ -40,7 +40,7 @@ static int dsi_get_phy(struct msm_dsi *msm_dsi)
|
||||
of_node_put(phy_node);
|
||||
|
||||
if (!phy_pdev || !msm_dsi->phy) {
|
||||
dev_err(&pdev->dev, "%s: phy driver is not ready\n", __func__);
|
||||
DRM_DEV_ERROR(&pdev->dev, "%s: phy driver is not ready\n", __func__);
|
||||
return -EPROBE_DEFER;
|
||||
}
|
||||
|
||||
@ -210,7 +210,7 @@ int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, struct drm_device *dev,
|
||||
|
||||
ret = msm_dsi_host_modeset_init(msm_dsi->host, dev);
|
||||
if (ret) {
|
||||
dev_err(dev->dev, "failed to modeset init host: %d\n", ret);
|
||||
DRM_DEV_ERROR(dev->dev, "failed to modeset init host: %d\n", ret);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
@ -222,7 +222,7 @@ int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, struct drm_device *dev,
|
||||
msm_dsi->bridge = msm_dsi_manager_bridge_init(msm_dsi->id);
|
||||
if (IS_ERR(msm_dsi->bridge)) {
|
||||
ret = PTR_ERR(msm_dsi->bridge);
|
||||
dev_err(dev->dev, "failed to create dsi bridge: %d\n", ret);
|
||||
DRM_DEV_ERROR(dev->dev, "failed to create dsi bridge: %d\n", ret);
|
||||
msm_dsi->bridge = NULL;
|
||||
goto fail;
|
||||
}
|
||||
@ -244,7 +244,7 @@ int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, struct drm_device *dev,
|
||||
|
||||
if (IS_ERR(msm_dsi->connector)) {
|
||||
ret = PTR_ERR(msm_dsi->connector);
|
||||
dev_err(dev->dev,
|
||||
DRM_DEV_ERROR(dev->dev,
|
||||
"failed to create dsi connector: %d\n", ret);
|
||||
msm_dsi->connector = NULL;
|
||||
goto fail;
|
||||
|
@ -1050,7 +1050,7 @@ static void dsi_wait4video_done(struct msm_dsi_host *msm_host)
|
||||
msecs_to_jiffies(70));
|
||||
|
||||
if (ret <= 0)
|
||||
dev_err(dev, "wait for video done timed out\n");
|
||||
DRM_DEV_ERROR(dev, "wait for video done timed out\n");
|
||||
|
||||
dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_VIDEO_DONE, 0);
|
||||
}
|
||||
@ -1673,7 +1673,7 @@ static int dsi_host_parse_lane_data(struct msm_dsi_host *msm_host,
|
||||
|
||||
prop = of_find_property(ep, "data-lanes", &len);
|
||||
if (!prop) {
|
||||
dev_dbg(dev,
|
||||
DRM_DEV_DEBUG(dev,
|
||||
"failed to find data lane mapping, using default\n");
|
||||
return 0;
|
||||
}
|
||||
@ -1681,7 +1681,7 @@ static int dsi_host_parse_lane_data(struct msm_dsi_host *msm_host,
|
||||
num_lanes = len / sizeof(u32);
|
||||
|
||||
if (num_lanes < 1 || num_lanes > 4) {
|
||||
dev_err(dev, "bad number of data lanes\n");
|
||||
DRM_DEV_ERROR(dev, "bad number of data lanes\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@ -1690,7 +1690,7 @@ static int dsi_host_parse_lane_data(struct msm_dsi_host *msm_host,
|
||||
ret = of_property_read_u32_array(ep, "data-lanes", lane_map,
|
||||
num_lanes);
|
||||
if (ret) {
|
||||
dev_err(dev, "failed to read lane data\n");
|
||||
DRM_DEV_ERROR(dev, "failed to read lane data\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -1711,7 +1711,7 @@ static int dsi_host_parse_lane_data(struct msm_dsi_host *msm_host,
|
||||
*/
|
||||
for (j = 0; j < num_lanes; j++) {
|
||||
if (lane_map[j] < 0 || lane_map[j] > 3)
|
||||
dev_err(dev, "bad physical lane entry %u\n",
|
||||
DRM_DEV_ERROR(dev, "bad physical lane entry %u\n",
|
||||
lane_map[j]);
|
||||
|
||||
if (swap[lane_map[j]] != j)
|
||||
@ -1742,13 +1742,13 @@ static int dsi_host_parse_dt(struct msm_dsi_host *msm_host)
|
||||
*/
|
||||
endpoint = of_graph_get_endpoint_by_regs(np, 1, -1);
|
||||
if (!endpoint) {
|
||||
dev_dbg(dev, "%s: no endpoint\n", __func__);
|
||||
DRM_DEV_DEBUG(dev, "%s: no endpoint\n", __func__);
|
||||
return 0;
|
||||
}
|
||||
|
||||
ret = dsi_host_parse_lane_data(msm_host, endpoint);
|
||||
if (ret) {
|
||||
dev_err(dev, "%s: invalid lane configuration %d\n",
|
||||
DRM_DEV_ERROR(dev, "%s: invalid lane configuration %d\n",
|
||||
__func__, ret);
|
||||
ret = -EINVAL;
|
||||
goto err;
|
||||
@ -1757,7 +1757,7 @@ static int dsi_host_parse_dt(struct msm_dsi_host *msm_host)
|
||||
/* Get panel node from the output port's endpoint data */
|
||||
device_node = of_graph_get_remote_node(np, 1, 0);
|
||||
if (!device_node) {
|
||||
dev_dbg(dev, "%s: no valid device\n", __func__);
|
||||
DRM_DEV_DEBUG(dev, "%s: no valid device\n", __func__);
|
||||
ret = -ENODEV;
|
||||
goto err;
|
||||
}
|
||||
@ -1768,7 +1768,7 @@ static int dsi_host_parse_dt(struct msm_dsi_host *msm_host)
|
||||
msm_host->sfpb = syscon_regmap_lookup_by_phandle(np,
|
||||
"syscon-sfpb");
|
||||
if (IS_ERR(msm_host->sfpb)) {
|
||||
dev_err(dev, "%s: failed to get sfpb regmap\n",
|
||||
DRM_DEV_ERROR(dev, "%s: failed to get sfpb regmap\n",
|
||||
__func__);
|
||||
ret = PTR_ERR(msm_host->sfpb);
|
||||
}
|
||||
@ -1918,7 +1918,7 @@ int msm_dsi_host_modeset_init(struct mipi_dsi_host *host,
|
||||
msm_host->irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
|
||||
if (msm_host->irq < 0) {
|
||||
ret = msm_host->irq;
|
||||
dev_err(dev->dev, "failed to get irq: %d\n", ret);
|
||||
DRM_DEV_ERROR(dev->dev, "failed to get irq: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -1926,7 +1926,7 @@ int msm_dsi_host_modeset_init(struct mipi_dsi_host *host,
|
||||
dsi_host_irq, IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
|
||||
"dsi_isr", msm_host);
|
||||
if (ret < 0) {
|
||||
dev_err(&pdev->dev, "failed to request IRQ%u: %d\n",
|
||||
DRM_DEV_ERROR(&pdev->dev, "failed to request IRQ%u: %d\n",
|
||||
msm_host->irq, ret);
|
||||
return ret;
|
||||
}
|
||||
|
@ -404,7 +404,7 @@ static int dsi_phy_regulator_init(struct msm_dsi_phy *phy)
|
||||
|
||||
ret = devm_regulator_bulk_get(dev, num, s);
|
||||
if (ret < 0) {
|
||||
dev_err(dev, "%s: failed to init regulator, ret=%d\n",
|
||||
DRM_DEV_ERROR(dev, "%s: failed to init regulator, ret=%d\n",
|
||||
__func__, ret);
|
||||
return ret;
|
||||
}
|
||||
@ -441,7 +441,7 @@ static int dsi_phy_regulator_enable(struct msm_dsi_phy *phy)
|
||||
ret = regulator_set_load(s[i].consumer,
|
||||
regs[i].enable_load);
|
||||
if (ret < 0) {
|
||||
dev_err(dev,
|
||||
DRM_DEV_ERROR(dev,
|
||||
"regulator %d set op mode failed, %d\n",
|
||||
i, ret);
|
||||
goto fail;
|
||||
@ -451,7 +451,7 @@ static int dsi_phy_regulator_enable(struct msm_dsi_phy *phy)
|
||||
|
||||
ret = regulator_bulk_enable(num, s);
|
||||
if (ret < 0) {
|
||||
dev_err(dev, "regulator enable failed, %d\n", ret);
|
||||
DRM_DEV_ERROR(dev, "regulator enable failed, %d\n", ret);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
@ -472,7 +472,7 @@ static int dsi_phy_enable_resource(struct msm_dsi_phy *phy)
|
||||
|
||||
ret = clk_prepare_enable(phy->ahb_clk);
|
||||
if (ret) {
|
||||
dev_err(dev, "%s: can't enable ahb clk, %d\n", __func__, ret);
|
||||
DRM_DEV_ERROR(dev, "%s: can't enable ahb clk, %d\n", __func__, ret);
|
||||
pm_runtime_put_sync(dev);
|
||||
}
|
||||
|
||||
@ -543,7 +543,7 @@ int msm_dsi_phy_init_common(struct msm_dsi_phy *phy)
|
||||
phy->reg_base = msm_ioremap(pdev, "dsi_phy_regulator",
|
||||
"DSI_PHY_REG");
|
||||
if (IS_ERR(phy->reg_base)) {
|
||||
dev_err(&pdev->dev, "%s: failed to map phy regulator base\n",
|
||||
DRM_DEV_ERROR(&pdev->dev, "%s: failed to map phy regulator base\n",
|
||||
__func__);
|
||||
ret = -ENOMEM;
|
||||
goto fail;
|
||||
@ -574,7 +574,7 @@ static int dsi_phy_driver_probe(struct platform_device *pdev)
|
||||
phy->id = dsi_phy_get_id(phy);
|
||||
if (phy->id < 0) {
|
||||
ret = phy->id;
|
||||
dev_err(dev, "%s: couldn't identify PHY index, %d\n",
|
||||
DRM_DEV_ERROR(dev, "%s: couldn't identify PHY index, %d\n",
|
||||
__func__, ret);
|
||||
goto fail;
|
||||
}
|
||||
@ -584,20 +584,20 @@ static int dsi_phy_driver_probe(struct platform_device *pdev)
|
||||
|
||||
phy->base = msm_ioremap(pdev, "dsi_phy", "DSI_PHY");
|
||||
if (IS_ERR(phy->base)) {
|
||||
dev_err(dev, "%s: failed to map phy base\n", __func__);
|
||||
DRM_DEV_ERROR(dev, "%s: failed to map phy base\n", __func__);
|
||||
ret = -ENOMEM;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
ret = dsi_phy_regulator_init(phy);
|
||||
if (ret) {
|
||||
dev_err(dev, "%s: failed to init regulator\n", __func__);
|
||||
DRM_DEV_ERROR(dev, "%s: failed to init regulator\n", __func__);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
phy->ahb_clk = msm_clk_get(pdev, "iface");
|
||||
if (IS_ERR(phy->ahb_clk)) {
|
||||
dev_err(dev, "%s: Unable to get ahb clk\n", __func__);
|
||||
DRM_DEV_ERROR(dev, "%s: Unable to get ahb clk\n", __func__);
|
||||
ret = PTR_ERR(phy->ahb_clk);
|
||||
goto fail;
|
||||
}
|
||||
@ -617,7 +617,7 @@ static int dsi_phy_driver_probe(struct platform_device *pdev)
|
||||
|
||||
phy->pll = msm_dsi_pll_init(pdev, phy->cfg->type, phy->id);
|
||||
if (IS_ERR_OR_NULL(phy->pll))
|
||||
dev_info(dev,
|
||||
DRM_DEV_INFO(dev,
|
||||
"%s: pll init failed: %ld, need separate pll clk driver\n",
|
||||
__func__, PTR_ERR(phy->pll));
|
||||
|
||||
@ -675,21 +675,21 @@ int msm_dsi_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
|
||||
|
||||
ret = dsi_phy_enable_resource(phy);
|
||||
if (ret) {
|
||||
dev_err(dev, "%s: resource enable failed, %d\n",
|
||||
DRM_DEV_ERROR(dev, "%s: resource enable failed, %d\n",
|
||||
__func__, ret);
|
||||
goto res_en_fail;
|
||||
}
|
||||
|
||||
ret = dsi_phy_regulator_enable(phy);
|
||||
if (ret) {
|
||||
dev_err(dev, "%s: regulator enable failed, %d\n",
|
||||
DRM_DEV_ERROR(dev, "%s: regulator enable failed, %d\n",
|
||||
__func__, ret);
|
||||
goto reg_en_fail;
|
||||
}
|
||||
|
||||
ret = phy->cfg->ops.enable(phy, src_pll_id, clk_req);
|
||||
if (ret) {
|
||||
dev_err(dev, "%s: phy enable failed, %d\n", __func__, ret);
|
||||
DRM_DEV_ERROR(dev, "%s: phy enable failed, %d\n", __func__, ret);
|
||||
goto phy_en_fail;
|
||||
}
|
||||
|
||||
@ -702,7 +702,7 @@ int msm_dsi_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
|
||||
if (phy->usecase != MSM_DSI_PHY_SLAVE) {
|
||||
ret = msm_dsi_pll_restore_state(phy->pll);
|
||||
if (ret) {
|
||||
dev_err(dev, "%s: failed to restore pll state, %d\n",
|
||||
DRM_DEV_ERROR(dev, "%s: failed to restore pll state, %d\n",
|
||||
__func__, ret);
|
||||
goto pll_restor_fail;
|
||||
}
|
||||
|
@ -93,7 +93,7 @@ static int dsi_10nm_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
|
||||
DBG("");
|
||||
|
||||
if (msm_dsi_dphy_timing_calc_v3(timing, clk_req)) {
|
||||
dev_err(&phy->pdev->dev,
|
||||
DRM_DEV_ERROR(&phy->pdev->dev,
|
||||
"%s: D-PHY timing calculation failed\n", __func__);
|
||||
return -EINVAL;
|
||||
}
|
||||
@ -172,7 +172,7 @@ static int dsi_10nm_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
|
||||
|
||||
ret = msm_dsi_pll_set_usecase(phy->pll, phy->usecase);
|
||||
if (ret) {
|
||||
dev_err(&phy->pdev->dev, "%s: set pll usecase failed, %d\n",
|
||||
DRM_DEV_ERROR(&phy->pdev->dev, "%s: set pll usecase failed, %d\n",
|
||||
__func__, ret);
|
||||
return ret;
|
||||
}
|
||||
@ -196,7 +196,7 @@ static int dsi_10nm_phy_init(struct msm_dsi_phy *phy)
|
||||
phy->lane_base = msm_ioremap(pdev, "dsi_phy_lane",
|
||||
"DSI_PHY_LANE");
|
||||
if (IS_ERR(phy->lane_base)) {
|
||||
dev_err(&pdev->dev, "%s: failed to map phy lane base\n",
|
||||
DRM_DEV_ERROR(&pdev->dev, "%s: failed to map phy lane base\n",
|
||||
__func__);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
@ -64,7 +64,7 @@ static int dsi_14nm_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
|
||||
void __iomem *lane_base = phy->lane_base;
|
||||
|
||||
if (msm_dsi_dphy_timing_calc_v2(timing, clk_req)) {
|
||||
dev_err(&phy->pdev->dev,
|
||||
DRM_DEV_ERROR(&phy->pdev->dev,
|
||||
"%s: D-PHY timing calculation failed\n", __func__);
|
||||
return -EINVAL;
|
||||
}
|
||||
@ -115,7 +115,7 @@ static int dsi_14nm_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
|
||||
|
||||
ret = msm_dsi_pll_set_usecase(phy->pll, phy->usecase);
|
||||
if (ret) {
|
||||
dev_err(&phy->pdev->dev, "%s: set pll usecase failed, %d\n",
|
||||
DRM_DEV_ERROR(&phy->pdev->dev, "%s: set pll usecase failed, %d\n",
|
||||
__func__, ret);
|
||||
return ret;
|
||||
}
|
||||
@ -142,7 +142,7 @@ static int dsi_14nm_phy_init(struct msm_dsi_phy *phy)
|
||||
phy->lane_base = msm_ioremap(pdev, "dsi_phy_lane",
|
||||
"DSI_PHY_LANE");
|
||||
if (IS_ERR(phy->lane_base)) {
|
||||
dev_err(&pdev->dev, "%s: failed to map phy lane base\n",
|
||||
DRM_DEV_ERROR(&pdev->dev, "%s: failed to map phy lane base\n",
|
||||
__func__);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
@ -82,7 +82,7 @@ static int dsi_20nm_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
|
||||
DBG("");
|
||||
|
||||
if (msm_dsi_dphy_timing_calc(timing, clk_req)) {
|
||||
dev_err(&phy->pdev->dev,
|
||||
DRM_DEV_ERROR(&phy->pdev->dev,
|
||||
"%s: D-PHY timing calculation failed\n", __func__);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -76,7 +76,7 @@ static int dsi_28nm_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
|
||||
DBG("");
|
||||
|
||||
if (msm_dsi_dphy_timing_calc(timing, clk_req)) {
|
||||
dev_err(&phy->pdev->dev,
|
||||
DRM_DEV_ERROR(&phy->pdev->dev,
|
||||
"%s: D-PHY timing calculation failed\n", __func__);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -132,7 +132,7 @@ static int dsi_28nm_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
|
||||
DBG("");
|
||||
|
||||
if (msm_dsi_dphy_timing_calc(timing, clk_req)) {
|
||||
dev_err(&phy->pdev->dev,
|
||||
DRM_DEV_ERROR(&phy->pdev->dev,
|
||||
"%s: D-PHY timing calculation failed\n", __func__);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -175,7 +175,7 @@ struct msm_dsi_pll *msm_dsi_pll_init(struct platform_device *pdev,
|
||||
}
|
||||
|
||||
if (IS_ERR(pll)) {
|
||||
dev_err(dev, "%s: failed to init DSI PLL\n", __func__);
|
||||
DRM_DEV_ERROR(dev, "%s: failed to init DSI PLL\n", __func__);
|
||||
return pll;
|
||||
}
|
||||
|
||||
|
@ -760,7 +760,7 @@ static int pll_10nm_register(struct dsi_pll_10nm *pll_10nm)
|
||||
ret = of_clk_add_hw_provider(dev->of_node, of_clk_hw_onecell_get,
|
||||
pll_10nm->hw_data);
|
||||
if (ret) {
|
||||
dev_err(dev, "failed to register clk provider: %d\n", ret);
|
||||
DRM_DEV_ERROR(dev, "failed to register clk provider: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -788,13 +788,13 @@ struct msm_dsi_pll *msm_dsi_pll_10nm_init(struct platform_device *pdev, int id)
|
||||
|
||||
pll_10nm->phy_cmn_mmio = msm_ioremap(pdev, "dsi_phy", "DSI_PHY");
|
||||
if (IS_ERR_OR_NULL(pll_10nm->phy_cmn_mmio)) {
|
||||
dev_err(&pdev->dev, "failed to map CMN PHY base\n");
|
||||
DRM_DEV_ERROR(&pdev->dev, "failed to map CMN PHY base\n");
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
pll_10nm->mmio = msm_ioremap(pdev, "dsi_pll", "DSI_PLL");
|
||||
if (IS_ERR_OR_NULL(pll_10nm->mmio)) {
|
||||
dev_err(&pdev->dev, "failed to map PLL base\n");
|
||||
DRM_DEV_ERROR(&pdev->dev, "failed to map PLL base\n");
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
@ -813,7 +813,7 @@ struct msm_dsi_pll *msm_dsi_pll_10nm_init(struct platform_device *pdev, int id)
|
||||
|
||||
ret = pll_10nm_register(pll_10nm);
|
||||
if (ret) {
|
||||
dev_err(&pdev->dev, "failed to register PLL: %d\n", ret);
|
||||
DRM_DEV_ERROR(&pdev->dev, "failed to register PLL: %d\n", ret);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
|
@ -783,7 +783,7 @@ static int dsi_pll_14nm_enable_seq(struct msm_dsi_pll *pll)
|
||||
POLL_TIMEOUT_US);
|
||||
|
||||
if (unlikely(!locked))
|
||||
dev_err(&pll_14nm->pdev->dev, "DSI PLL lock failed\n");
|
||||
DRM_DEV_ERROR(&pll_14nm->pdev->dev, "DSI PLL lock failed\n");
|
||||
else
|
||||
DBG("DSI PLL lock success");
|
||||
|
||||
@ -829,7 +829,7 @@ static int dsi_pll_14nm_restore_state(struct msm_dsi_pll *pll)
|
||||
ret = dsi_pll_14nm_vco_set_rate(&pll->clk_hw,
|
||||
cached_state->vco_rate, 0);
|
||||
if (ret) {
|
||||
dev_err(&pll_14nm->pdev->dev,
|
||||
DRM_DEV_ERROR(&pll_14nm->pdev->dev,
|
||||
"restore vco rate failed. ret=%d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
@ -1039,7 +1039,7 @@ static int pll_14nm_register(struct dsi_pll_14nm *pll_14nm)
|
||||
ret = of_clk_add_hw_provider(dev->of_node, of_clk_hw_onecell_get,
|
||||
pll_14nm->hw_data);
|
||||
if (ret) {
|
||||
dev_err(dev, "failed to register clk provider: %d\n", ret);
|
||||
DRM_DEV_ERROR(dev, "failed to register clk provider: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -1067,13 +1067,13 @@ struct msm_dsi_pll *msm_dsi_pll_14nm_init(struct platform_device *pdev, int id)
|
||||
|
||||
pll_14nm->phy_cmn_mmio = msm_ioremap(pdev, "dsi_phy", "DSI_PHY");
|
||||
if (IS_ERR_OR_NULL(pll_14nm->phy_cmn_mmio)) {
|
||||
dev_err(&pdev->dev, "failed to map CMN PHY base\n");
|
||||
DRM_DEV_ERROR(&pdev->dev, "failed to map CMN PHY base\n");
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
pll_14nm->mmio = msm_ioremap(pdev, "dsi_pll", "DSI_PLL");
|
||||
if (IS_ERR_OR_NULL(pll_14nm->mmio)) {
|
||||
dev_err(&pdev->dev, "failed to map PLL base\n");
|
||||
DRM_DEV_ERROR(&pdev->dev, "failed to map PLL base\n");
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
@ -1096,7 +1096,7 @@ struct msm_dsi_pll *msm_dsi_pll_14nm_init(struct platform_device *pdev, int id)
|
||||
|
||||
ret = pll_14nm_register(pll_14nm);
|
||||
if (ret) {
|
||||
dev_err(&pdev->dev, "failed to register PLL: %d\n", ret);
|
||||
DRM_DEV_ERROR(&pdev->dev, "failed to register PLL: %d\n", ret);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
|
@ -156,7 +156,7 @@ static int dsi_pll_28nm_clk_set_rate(struct clk_hw *hw, unsigned long rate,
|
||||
if (rate <= lpfr_lut[i].vco_rate)
|
||||
break;
|
||||
if (i == LPFR_LUT_SIZE) {
|
||||
dev_err(dev, "unable to get loop filter resistance. vco=%lu\n",
|
||||
DRM_DEV_ERROR(dev, "unable to get loop filter resistance. vco=%lu\n",
|
||||
rate);
|
||||
return -EINVAL;
|
||||
}
|
||||
@ -386,7 +386,7 @@ static int dsi_pll_28nm_enable_seq_hpm(struct msm_dsi_pll *pll)
|
||||
}
|
||||
|
||||
if (unlikely(!locked))
|
||||
dev_err(dev, "DSI PLL lock failed\n");
|
||||
DRM_DEV_ERROR(dev, "DSI PLL lock failed\n");
|
||||
else
|
||||
DBG("DSI PLL Lock success");
|
||||
|
||||
@ -429,7 +429,7 @@ static int dsi_pll_28nm_enable_seq_lp(struct msm_dsi_pll *pll)
|
||||
locked = pll_28nm_poll_for_ready(pll_28nm, max_reads, timeout_us);
|
||||
|
||||
if (unlikely(!locked))
|
||||
dev_err(dev, "DSI PLL lock failed\n");
|
||||
DRM_DEV_ERROR(dev, "DSI PLL lock failed\n");
|
||||
else
|
||||
DBG("DSI PLL lock success");
|
||||
|
||||
@ -468,7 +468,7 @@ static int dsi_pll_28nm_restore_state(struct msm_dsi_pll *pll)
|
||||
ret = dsi_pll_28nm_clk_set_rate(&pll->clk_hw,
|
||||
cached_state->vco_rate, 0);
|
||||
if (ret) {
|
||||
dev_err(&pll_28nm->pdev->dev,
|
||||
DRM_DEV_ERROR(&pll_28nm->pdev->dev,
|
||||
"restore vco rate failed. ret=%d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
@ -581,7 +581,7 @@ static int pll_28nm_register(struct dsi_pll_28nm *pll_28nm)
|
||||
ret = of_clk_add_provider(dev->of_node,
|
||||
of_clk_src_onecell_get, &pll_28nm->clk_data);
|
||||
if (ret) {
|
||||
dev_err(dev, "failed to register clk provider: %d\n", ret);
|
||||
DRM_DEV_ERROR(dev, "failed to register clk provider: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -607,7 +607,7 @@ struct msm_dsi_pll *msm_dsi_pll_28nm_init(struct platform_device *pdev,
|
||||
|
||||
pll_28nm->mmio = msm_ioremap(pdev, "dsi_pll", "DSI_PLL");
|
||||
if (IS_ERR_OR_NULL(pll_28nm->mmio)) {
|
||||
dev_err(&pdev->dev, "%s: failed to map pll base\n", __func__);
|
||||
DRM_DEV_ERROR(&pdev->dev, "%s: failed to map pll base\n", __func__);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
@ -633,13 +633,13 @@ struct msm_dsi_pll *msm_dsi_pll_28nm_init(struct platform_device *pdev,
|
||||
pll->en_seq_cnt = 1;
|
||||
pll->enable_seqs[0] = dsi_pll_28nm_enable_seq_lp;
|
||||
} else {
|
||||
dev_err(&pdev->dev, "phy type (%d) is not 28nm\n", type);
|
||||
DRM_DEV_ERROR(&pdev->dev, "phy type (%d) is not 28nm\n", type);
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
ret = pll_28nm_register(pll_28nm);
|
||||
if (ret) {
|
||||
dev_err(&pdev->dev, "failed to register PLL: %d\n", ret);
|
||||
DRM_DEV_ERROR(&pdev->dev, "failed to register PLL: %d\n", ret);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
|
@ -327,7 +327,7 @@ static int dsi_pll_28nm_enable_seq(struct msm_dsi_pll *pll)
|
||||
locked = pll_28nm_poll_for_ready(pll_28nm, max_reads, timeout_us);
|
||||
|
||||
if (unlikely(!locked))
|
||||
dev_err(dev, "DSI PLL lock failed\n");
|
||||
DRM_DEV_ERROR(dev, "DSI PLL lock failed\n");
|
||||
else
|
||||
DBG("DSI PLL lock success");
|
||||
|
||||
@ -368,7 +368,7 @@ static int dsi_pll_28nm_restore_state(struct msm_dsi_pll *pll)
|
||||
ret = dsi_pll_28nm_clk_set_rate(&pll->clk_hw,
|
||||
cached_state->vco_rate, 0);
|
||||
if (ret) {
|
||||
dev_err(&pll_28nm->pdev->dev,
|
||||
DRM_DEV_ERROR(&pll_28nm->pdev->dev,
|
||||
"restore vco rate failed. ret=%d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
@ -482,7 +482,7 @@ static int pll_28nm_register(struct dsi_pll_28nm *pll_28nm)
|
||||
ret = of_clk_add_provider(dev->of_node,
|
||||
of_clk_src_onecell_get, &pll_28nm->clk_data);
|
||||
if (ret) {
|
||||
dev_err(dev, "failed to register clk provider: %d\n", ret);
|
||||
DRM_DEV_ERROR(dev, "failed to register clk provider: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -508,7 +508,7 @@ struct msm_dsi_pll *msm_dsi_pll_28nm_8960_init(struct platform_device *pdev,
|
||||
|
||||
pll_28nm->mmio = msm_ioremap(pdev, "dsi_pll", "DSI_PLL");
|
||||
if (IS_ERR_OR_NULL(pll_28nm->mmio)) {
|
||||
dev_err(&pdev->dev, "%s: failed to map pll base\n", __func__);
|
||||
DRM_DEV_ERROR(&pdev->dev, "%s: failed to map pll base\n", __func__);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
@ -526,7 +526,7 @@ struct msm_dsi_pll *msm_dsi_pll_28nm_8960_init(struct platform_device *pdev,
|
||||
|
||||
ret = pll_28nm_register(pll_28nm);
|
||||
if (ret) {
|
||||
dev_err(&pdev->dev, "failed to register PLL: %d\n", ret);
|
||||
DRM_DEV_ERROR(&pdev->dev, "failed to register PLL: %d\n", ret);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
|
@ -157,7 +157,7 @@ int msm_edp_modeset_init(struct msm_edp *edp, struct drm_device *dev,
|
||||
edp->bridge = msm_edp_bridge_init(edp);
|
||||
if (IS_ERR(edp->bridge)) {
|
||||
ret = PTR_ERR(edp->bridge);
|
||||
dev_err(dev->dev, "failed to create eDP bridge: %d\n", ret);
|
||||
DRM_DEV_ERROR(dev->dev, "failed to create eDP bridge: %d\n", ret);
|
||||
edp->bridge = NULL;
|
||||
goto fail;
|
||||
}
|
||||
@ -165,7 +165,7 @@ int msm_edp_modeset_init(struct msm_edp *edp, struct drm_device *dev,
|
||||
edp->connector = msm_edp_connector_init(edp);
|
||||
if (IS_ERR(edp->connector)) {
|
||||
ret = PTR_ERR(edp->connector);
|
||||
dev_err(dev->dev, "failed to create eDP connector: %d\n", ret);
|
||||
DRM_DEV_ERROR(dev->dev, "failed to create eDP connector: %d\n", ret);
|
||||
edp->connector = NULL;
|
||||
goto fail;
|
||||
}
|
||||
@ -173,7 +173,7 @@ int msm_edp_modeset_init(struct msm_edp *edp, struct drm_device *dev,
|
||||
edp->irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
|
||||
if (edp->irq < 0) {
|
||||
ret = edp->irq;
|
||||
dev_err(dev->dev, "failed to get IRQ: %d\n", ret);
|
||||
DRM_DEV_ERROR(dev->dev, "failed to get IRQ: %d\n", ret);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
@ -181,7 +181,7 @@ int msm_edp_modeset_init(struct msm_edp *edp, struct drm_device *dev,
|
||||
edp_irq, IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
|
||||
"edp_isr", edp);
|
||||
if (ret < 0) {
|
||||
dev_err(dev->dev, "failed to request IRQ%u: %d\n",
|
||||
DRM_DEV_ERROR(dev->dev, "failed to request IRQ%u: %d\n",
|
||||
edp->irq, ret);
|
||||
goto fail;
|
||||
}
|
||||
|
@ -98,7 +98,7 @@ static int msm_hdmi_get_phy(struct hdmi *hdmi)
|
||||
|
||||
phy_node = of_parse_phandle(pdev->dev.of_node, "phys", 0);
|
||||
if (!phy_node) {
|
||||
dev_err(&pdev->dev, "cannot find phy device\n");
|
||||
DRM_DEV_ERROR(&pdev->dev, "cannot find phy device\n");
|
||||
return -ENXIO;
|
||||
}
|
||||
|
||||
@ -109,7 +109,7 @@ static int msm_hdmi_get_phy(struct hdmi *hdmi)
|
||||
of_node_put(phy_node);
|
||||
|
||||
if (!phy_pdev || !hdmi->phy) {
|
||||
dev_err(&pdev->dev, "phy driver is not ready\n");
|
||||
DRM_DEV_ERROR(&pdev->dev, "phy driver is not ready\n");
|
||||
return -EPROBE_DEFER;
|
||||
}
|
||||
|
||||
@ -153,7 +153,7 @@ static struct hdmi *msm_hdmi_init(struct platform_device *pdev)
|
||||
hdmi->qfprom_mmio = msm_ioremap(pdev,
|
||||
config->qfprom_mmio_name, "HDMI_QFPROM");
|
||||
if (IS_ERR(hdmi->qfprom_mmio)) {
|
||||
dev_info(&pdev->dev, "can't find qfprom resource\n");
|
||||
DRM_DEV_INFO(&pdev->dev, "can't find qfprom resource\n");
|
||||
hdmi->qfprom_mmio = NULL;
|
||||
}
|
||||
|
||||
@ -172,7 +172,7 @@ static struct hdmi *msm_hdmi_init(struct platform_device *pdev)
|
||||
config->hpd_reg_names[i]);
|
||||
if (IS_ERR(reg)) {
|
||||
ret = PTR_ERR(reg);
|
||||
dev_err(&pdev->dev, "failed to get hpd regulator: %s (%d)\n",
|
||||
DRM_DEV_ERROR(&pdev->dev, "failed to get hpd regulator: %s (%d)\n",
|
||||
config->hpd_reg_names[i], ret);
|
||||
goto fail;
|
||||
}
|
||||
@ -195,7 +195,7 @@ static struct hdmi *msm_hdmi_init(struct platform_device *pdev)
|
||||
config->pwr_reg_names[i]);
|
||||
if (IS_ERR(reg)) {
|
||||
ret = PTR_ERR(reg);
|
||||
dev_err(&pdev->dev, "failed to get pwr regulator: %s (%d)\n",
|
||||
DRM_DEV_ERROR(&pdev->dev, "failed to get pwr regulator: %s (%d)\n",
|
||||
config->pwr_reg_names[i], ret);
|
||||
goto fail;
|
||||
}
|
||||
@ -217,7 +217,7 @@ static struct hdmi *msm_hdmi_init(struct platform_device *pdev)
|
||||
clk = msm_clk_get(pdev, config->hpd_clk_names[i]);
|
||||
if (IS_ERR(clk)) {
|
||||
ret = PTR_ERR(clk);
|
||||
dev_err(&pdev->dev, "failed to get hpd clk: %s (%d)\n",
|
||||
DRM_DEV_ERROR(&pdev->dev, "failed to get hpd clk: %s (%d)\n",
|
||||
config->hpd_clk_names[i], ret);
|
||||
goto fail;
|
||||
}
|
||||
@ -239,7 +239,7 @@ static struct hdmi *msm_hdmi_init(struct platform_device *pdev)
|
||||
clk = msm_clk_get(pdev, config->pwr_clk_names[i]);
|
||||
if (IS_ERR(clk)) {
|
||||
ret = PTR_ERR(clk);
|
||||
dev_err(&pdev->dev, "failed to get pwr clk: %s (%d)\n",
|
||||
DRM_DEV_ERROR(&pdev->dev, "failed to get pwr clk: %s (%d)\n",
|
||||
config->pwr_clk_names[i], ret);
|
||||
goto fail;
|
||||
}
|
||||
@ -254,14 +254,14 @@ static struct hdmi *msm_hdmi_init(struct platform_device *pdev)
|
||||
hdmi->i2c = msm_hdmi_i2c_init(hdmi);
|
||||
if (IS_ERR(hdmi->i2c)) {
|
||||
ret = PTR_ERR(hdmi->i2c);
|
||||
dev_err(&pdev->dev, "failed to get i2c: %d\n", ret);
|
||||
DRM_DEV_ERROR(&pdev->dev, "failed to get i2c: %d\n", ret);
|
||||
hdmi->i2c = NULL;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
ret = msm_hdmi_get_phy(hdmi);
|
||||
if (ret) {
|
||||
dev_err(&pdev->dev, "failed to get phy\n");
|
||||
DRM_DEV_ERROR(&pdev->dev, "failed to get phy\n");
|
||||
goto fail;
|
||||
}
|
||||
|
||||
@ -303,7 +303,7 @@ int msm_hdmi_modeset_init(struct hdmi *hdmi,
|
||||
hdmi->bridge = msm_hdmi_bridge_init(hdmi);
|
||||
if (IS_ERR(hdmi->bridge)) {
|
||||
ret = PTR_ERR(hdmi->bridge);
|
||||
dev_err(dev->dev, "failed to create HDMI bridge: %d\n", ret);
|
||||
DRM_DEV_ERROR(dev->dev, "failed to create HDMI bridge: %d\n", ret);
|
||||
hdmi->bridge = NULL;
|
||||
goto fail;
|
||||
}
|
||||
@ -311,7 +311,7 @@ int msm_hdmi_modeset_init(struct hdmi *hdmi,
|
||||
hdmi->connector = msm_hdmi_connector_init(hdmi);
|
||||
if (IS_ERR(hdmi->connector)) {
|
||||
ret = PTR_ERR(hdmi->connector);
|
||||
dev_err(dev->dev, "failed to create HDMI connector: %d\n", ret);
|
||||
DRM_DEV_ERROR(dev->dev, "failed to create HDMI connector: %d\n", ret);
|
||||
hdmi->connector = NULL;
|
||||
goto fail;
|
||||
}
|
||||
@ -319,7 +319,7 @@ int msm_hdmi_modeset_init(struct hdmi *hdmi,
|
||||
hdmi->irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
|
||||
if (hdmi->irq < 0) {
|
||||
ret = hdmi->irq;
|
||||
dev_err(dev->dev, "failed to get irq: %d\n", ret);
|
||||
DRM_DEV_ERROR(dev->dev, "failed to get irq: %d\n", ret);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
@ -327,7 +327,7 @@ int msm_hdmi_modeset_init(struct hdmi *hdmi,
|
||||
msm_hdmi_irq, IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
|
||||
"hdmi_isr", hdmi);
|
||||
if (ret < 0) {
|
||||
dev_err(dev->dev, "failed to request IRQ%u: %d\n",
|
||||
DRM_DEV_ERROR(dev->dev, "failed to request IRQ%u: %d\n",
|
||||
hdmi->irq, ret);
|
||||
goto fail;
|
||||
}
|
||||
@ -476,7 +476,7 @@ static int msm_hdmi_audio_hw_params(struct device *dev, void *data,
|
||||
unsigned int level_shift = 0; /* 0dB */
|
||||
bool down_mix = false;
|
||||
|
||||
dev_dbg(dev, "%u Hz, %d bit, %d channels\n", params->sample_rate,
|
||||
DRM_DEV_DEBUG(dev, "%u Hz, %d bit, %d channels\n", params->sample_rate,
|
||||
params->sample_width, params->cea.channels);
|
||||
|
||||
switch (params->cea.channels) {
|
||||
@ -527,7 +527,7 @@ static int msm_hdmi_audio_hw_params(struct device *dev, void *data,
|
||||
rate = HDMI_SAMPLE_RATE_192KHZ;
|
||||
break;
|
||||
default:
|
||||
dev_err(dev, "rate[%d] not supported!\n",
|
||||
DRM_DEV_ERROR(dev, "rate[%d] not supported!\n",
|
||||
params->sample_rate);
|
||||
return -EINVAL;
|
||||
}
|
||||
@ -579,7 +579,7 @@ static int msm_hdmi_bind(struct device *dev, struct device *master, void *data)
|
||||
hdmi_cfg = (struct hdmi_platform_config *)
|
||||
of_device_get_match_data(dev);
|
||||
if (!hdmi_cfg) {
|
||||
dev_err(dev, "unknown hdmi_cfg: %pOFn\n", of_node);
|
||||
DRM_DEV_ERROR(dev, "unknown hdmi_cfg: %pOFn\n", of_node);
|
||||
return -ENXIO;
|
||||
}
|
||||
|
||||
|
@ -40,7 +40,7 @@ static void msm_hdmi_power_on(struct drm_bridge *bridge)
|
||||
for (i = 0; i < config->pwr_reg_cnt; i++) {
|
||||
ret = regulator_enable(hdmi->pwr_regs[i]);
|
||||
if (ret) {
|
||||
dev_err(dev->dev, "failed to enable pwr regulator: %s (%d)\n",
|
||||
DRM_DEV_ERROR(dev->dev, "failed to enable pwr regulator: %s (%d)\n",
|
||||
config->pwr_reg_names[i], ret);
|
||||
}
|
||||
}
|
||||
@ -49,7 +49,7 @@ static void msm_hdmi_power_on(struct drm_bridge *bridge)
|
||||
DBG("pixclock: %lu", hdmi->pixclock);
|
||||
ret = clk_set_rate(hdmi->pwr_clks[0], hdmi->pixclock);
|
||||
if (ret) {
|
||||
dev_err(dev->dev, "failed to set pixel clk: %s (%d)\n",
|
||||
DRM_DEV_ERROR(dev->dev, "failed to set pixel clk: %s (%d)\n",
|
||||
config->pwr_clk_names[0], ret);
|
||||
}
|
||||
}
|
||||
@ -57,7 +57,7 @@ static void msm_hdmi_power_on(struct drm_bridge *bridge)
|
||||
for (i = 0; i < config->pwr_clk_cnt; i++) {
|
||||
ret = clk_prepare_enable(hdmi->pwr_clks[i]);
|
||||
if (ret) {
|
||||
dev_err(dev->dev, "failed to enable pwr clk: %s (%d)\n",
|
||||
DRM_DEV_ERROR(dev->dev, "failed to enable pwr clk: %s (%d)\n",
|
||||
config->pwr_clk_names[i], ret);
|
||||
}
|
||||
}
|
||||
@ -82,7 +82,7 @@ static void power_off(struct drm_bridge *bridge)
|
||||
for (i = 0; i < config->pwr_reg_cnt; i++) {
|
||||
ret = regulator_disable(hdmi->pwr_regs[i]);
|
||||
if (ret) {
|
||||
dev_err(dev->dev, "failed to disable pwr regulator: %s (%d)\n",
|
||||
DRM_DEV_ERROR(dev->dev, "failed to disable pwr regulator: %s (%d)\n",
|
||||
config->pwr_reg_names[i], ret);
|
||||
}
|
||||
}
|
||||
@ -105,7 +105,7 @@ static void msm_hdmi_config_avi_infoframe(struct hdmi *hdmi)
|
||||
|
||||
len = hdmi_infoframe_pack(&frame, buffer, sizeof(buffer));
|
||||
if (len < 0) {
|
||||
dev_err(&hdmi->pdev->dev,
|
||||
DRM_DEV_ERROR(&hdmi->pdev->dev,
|
||||
"failed to configure avi infoframe\n");
|
||||
return;
|
||||
}
|
||||
|
@ -90,7 +90,7 @@ static int gpio_config(struct hdmi *hdmi, bool on)
|
||||
if (gpio.num != -1) {
|
||||
ret = gpio_request(gpio.num, gpio.label);
|
||||
if (ret) {
|
||||
dev_err(dev,
|
||||
DRM_DEV_ERROR(dev,
|
||||
"'%s'(%d) gpio_request failed: %d\n",
|
||||
gpio.label, gpio.num, ret);
|
||||
goto err;
|
||||
@ -156,7 +156,7 @@ static void enable_hpd_clocks(struct hdmi *hdmi, bool enable)
|
||||
|
||||
ret = clk_prepare_enable(hdmi->hpd_clks[i]);
|
||||
if (ret) {
|
||||
dev_err(dev,
|
||||
DRM_DEV_ERROR(dev,
|
||||
"failed to enable hpd clk: %s (%d)\n",
|
||||
config->hpd_clk_names[i], ret);
|
||||
}
|
||||
@ -179,7 +179,7 @@ static int hpd_enable(struct hdmi_connector *hdmi_connector)
|
||||
for (i = 0; i < config->hpd_reg_cnt; i++) {
|
||||
ret = regulator_enable(hdmi->hpd_regs[i]);
|
||||
if (ret) {
|
||||
dev_err(dev, "failed to enable hpd regulator: %s (%d)\n",
|
||||
DRM_DEV_ERROR(dev, "failed to enable hpd regulator: %s (%d)\n",
|
||||
config->hpd_reg_names[i], ret);
|
||||
goto fail;
|
||||
}
|
||||
@ -187,13 +187,13 @@ static int hpd_enable(struct hdmi_connector *hdmi_connector)
|
||||
|
||||
ret = pinctrl_pm_select_default_state(dev);
|
||||
if (ret) {
|
||||
dev_err(dev, "pinctrl state chg failed: %d\n", ret);
|
||||
DRM_DEV_ERROR(dev, "pinctrl state chg failed: %d\n", ret);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
ret = gpio_config(hdmi, true);
|
||||
if (ret) {
|
||||
dev_err(dev, "failed to configure GPIOs: %d\n", ret);
|
||||
DRM_DEV_ERROR(dev, "failed to configure GPIOs: %d\n", ret);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
|
@ -66,7 +66,7 @@ static int ddc_clear_irq(struct hdmi_i2c_adapter *hdmi_i2c)
|
||||
} while ((ddc_int_ctrl & HDMI_DDC_INT_CTRL_SW_DONE_INT) && retry);
|
||||
|
||||
if (!retry) {
|
||||
dev_err(dev->dev, "timeout waiting for DDC\n");
|
||||
DRM_DEV_ERROR(dev->dev, "timeout waiting for DDC\n");
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
|
||||
|
@ -37,7 +37,7 @@ static int msm_hdmi_phy_resource_init(struct hdmi_phy *phy)
|
||||
reg = devm_regulator_get(dev, cfg->reg_names[i]);
|
||||
if (IS_ERR(reg)) {
|
||||
ret = PTR_ERR(reg);
|
||||
dev_err(dev, "failed to get phy regulator: %s (%d)\n",
|
||||
DRM_DEV_ERROR(dev, "failed to get phy regulator: %s (%d)\n",
|
||||
cfg->reg_names[i], ret);
|
||||
return ret;
|
||||
}
|
||||
@ -51,7 +51,7 @@ static int msm_hdmi_phy_resource_init(struct hdmi_phy *phy)
|
||||
clk = msm_clk_get(phy->pdev, cfg->clk_names[i]);
|
||||
if (IS_ERR(clk)) {
|
||||
ret = PTR_ERR(clk);
|
||||
dev_err(dev, "failed to get phy clock: %s (%d)\n",
|
||||
DRM_DEV_ERROR(dev, "failed to get phy clock: %s (%d)\n",
|
||||
cfg->clk_names[i], ret);
|
||||
return ret;
|
||||
}
|
||||
@ -73,14 +73,14 @@ int msm_hdmi_phy_resource_enable(struct hdmi_phy *phy)
|
||||
for (i = 0; i < cfg->num_regs; i++) {
|
||||
ret = regulator_enable(phy->regs[i]);
|
||||
if (ret)
|
||||
dev_err(dev, "failed to enable regulator: %s (%d)\n",
|
||||
DRM_DEV_ERROR(dev, "failed to enable regulator: %s (%d)\n",
|
||||
cfg->reg_names[i], ret);
|
||||
}
|
||||
|
||||
for (i = 0; i < cfg->num_clks; i++) {
|
||||
ret = clk_prepare_enable(phy->clks[i]);
|
||||
if (ret)
|
||||
dev_err(dev, "failed to enable clock: %s (%d)\n",
|
||||
DRM_DEV_ERROR(dev, "failed to enable clock: %s (%d)\n",
|
||||
cfg->clk_names[i], ret);
|
||||
}
|
||||
|
||||
@ -159,7 +159,7 @@ static int msm_hdmi_phy_probe(struct platform_device *pdev)
|
||||
|
||||
phy->mmio = msm_ioremap(pdev, "hdmi_phy", "HDMI_PHY");
|
||||
if (IS_ERR(phy->mmio)) {
|
||||
dev_err(dev, "%s: failed to map phy base\n", __func__);
|
||||
DRM_DEV_ERROR(dev, "%s: failed to map phy base\n", __func__);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
@ -177,7 +177,7 @@ static int msm_hdmi_phy_probe(struct platform_device *pdev)
|
||||
|
||||
ret = msm_hdmi_phy_pll_init(pdev, phy->cfg->type);
|
||||
if (ret) {
|
||||
dev_err(dev, "couldn't init PLL\n");
|
||||
DRM_DEV_ERROR(dev, "couldn't init PLL\n");
|
||||
msm_hdmi_phy_resource_disable(phy);
|
||||
return ret;
|
||||
}
|
||||
|
@ -725,7 +725,7 @@ int msm_hdmi_pll_8996_init(struct platform_device *pdev)
|
||||
|
||||
pll->mmio_qserdes_com = msm_ioremap(pdev, "hdmi_pll", "HDMI_PLL");
|
||||
if (IS_ERR(pll->mmio_qserdes_com)) {
|
||||
dev_err(dev, "failed to map pll base\n");
|
||||
DRM_DEV_ERROR(dev, "failed to map pll base\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
@ -737,7 +737,7 @@ int msm_hdmi_pll_8996_init(struct platform_device *pdev)
|
||||
|
||||
pll->mmio_qserdes_tx[i] = msm_ioremap(pdev, name, label);
|
||||
if (IS_ERR(pll->mmio_qserdes_tx[i])) {
|
||||
dev_err(dev, "failed to map pll base\n");
|
||||
DRM_DEV_ERROR(dev, "failed to map pll base\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
}
|
||||
@ -745,7 +745,7 @@ int msm_hdmi_pll_8996_init(struct platform_device *pdev)
|
||||
|
||||
clk = devm_clk_register(dev, &pll->clk_hw);
|
||||
if (IS_ERR(clk)) {
|
||||
dev_err(dev, "failed to register pll clock\n");
|
||||
DRM_DEV_ERROR(dev, "failed to register pll clock\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
@ -445,7 +445,7 @@ int msm_hdmi_pll_8960_init(struct platform_device *pdev)
|
||||
|
||||
pll->mmio = msm_ioremap(pdev, "hdmi_pll", "HDMI_PLL");
|
||||
if (IS_ERR(pll->mmio)) {
|
||||
dev_err(dev, "failed to map pll base\n");
|
||||
DRM_DEV_ERROR(dev, "failed to map pll base\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
@ -454,7 +454,7 @@ int msm_hdmi_pll_8960_init(struct platform_device *pdev)
|
||||
|
||||
clk = devm_clk_register(dev, &pll->clk_hw);
|
||||
if (IS_ERR(clk)) {
|
||||
dev_err(dev, "failed to register pll clock\n");
|
||||
DRM_DEV_ERROR(dev, "failed to register pll clock\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
@ -194,13 +194,13 @@ static int late_init_minor(struct drm_minor *minor)
|
||||
|
||||
ret = msm_rd_debugfs_init(minor);
|
||||
if (ret) {
|
||||
dev_err(minor->dev->dev, "could not install rd debugfs\n");
|
||||
DRM_DEV_ERROR(minor->dev->dev, "could not install rd debugfs\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = msm_perf_debugfs_init(minor);
|
||||
if (ret) {
|
||||
dev_err(minor->dev->dev, "could not install perf debugfs\n");
|
||||
DRM_DEV_ERROR(minor->dev->dev, "could not install perf debugfs\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -228,7 +228,7 @@ int msm_debugfs_init(struct drm_minor *minor)
|
||||
minor->debugfs_root, minor);
|
||||
|
||||
if (ret) {
|
||||
dev_err(dev->dev, "could not install msm_debugfs_list\n");
|
||||
DRM_DEV_ERROR(dev->dev, "could not install msm_debugfs_list\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -170,7 +170,7 @@ void __iomem *msm_ioremap(struct platform_device *pdev, const char *name,
|
||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
|
||||
if (!res) {
|
||||
dev_err(&pdev->dev, "failed to get memory resource: %s\n", name);
|
||||
DRM_DEV_ERROR(&pdev->dev, "failed to get memory resource: %s\n", name);
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
@ -178,7 +178,7 @@ void __iomem *msm_ioremap(struct platform_device *pdev, const char *name,
|
||||
|
||||
ptr = devm_ioremap_nocache(&pdev->dev, res->start, size);
|
||||
if (!ptr) {
|
||||
dev_err(&pdev->dev, "failed to ioremap: %s\n", name);
|
||||
DRM_DEV_ERROR(&pdev->dev, "failed to ioremap: %s\n", name);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
@ -419,12 +419,12 @@ static int msm_init_vram(struct drm_device *dev)
|
||||
p = dma_alloc_attrs(dev->dev, size,
|
||||
&priv->vram.paddr, GFP_KERNEL, attrs);
|
||||
if (!p) {
|
||||
dev_err(dev->dev, "failed to allocate VRAM\n");
|
||||
DRM_DEV_ERROR(dev->dev, "failed to allocate VRAM\n");
|
||||
priv->vram.paddr = 0;
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
dev_info(dev->dev, "VRAM: %08x->%08x\n",
|
||||
DRM_DEV_INFO(dev->dev, "VRAM: %08x->%08x\n",
|
||||
(uint32_t)priv->vram.paddr,
|
||||
(uint32_t)(priv->vram.paddr + size));
|
||||
}
|
||||
@ -444,7 +444,7 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
|
||||
|
||||
ddev = drm_dev_alloc(drv, dev);
|
||||
if (IS_ERR(ddev)) {
|
||||
dev_err(dev, "failed to allocate drm_device\n");
|
||||
DRM_DEV_ERROR(dev, "failed to allocate drm_device\n");
|
||||
return PTR_ERR(ddev);
|
||||
}
|
||||
|
||||
@ -519,7 +519,7 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
|
||||
* and (for example) use dmabuf/prime to share buffers with
|
||||
* imx drm driver on iMX5
|
||||
*/
|
||||
dev_err(dev, "failed to load kms\n");
|
||||
DRM_DEV_ERROR(dev, "failed to load kms\n");
|
||||
ret = PTR_ERR(kms);
|
||||
goto err_msm_uninit;
|
||||
}
|
||||
@ -530,7 +530,7 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
|
||||
if (kms) {
|
||||
ret = kms->funcs->hw_init(kms);
|
||||
if (ret) {
|
||||
dev_err(dev, "kms hw init failed: %d\n", ret);
|
||||
DRM_DEV_ERROR(dev, "kms hw init failed: %d\n", ret);
|
||||
goto err_msm_uninit;
|
||||
}
|
||||
}
|
||||
@ -561,7 +561,7 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
|
||||
ret);
|
||||
|
||||
if (IS_ERR(priv->disp_thread[i].thread)) {
|
||||
dev_err(dev, "failed to create crtc_commit kthread\n");
|
||||
DRM_DEV_ERROR(dev, "failed to create crtc_commit kthread\n");
|
||||
priv->disp_thread[i].thread = NULL;
|
||||
}
|
||||
|
||||
@ -573,6 +573,7 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
|
||||
kthread_run(kthread_worker_fn,
|
||||
&priv->event_thread[i].worker,
|
||||
"crtc_event:%d", priv->event_thread[i].crtc_id);
|
||||
|
||||
/**
|
||||
* event thread should also run at same priority as disp_thread
|
||||
* because it is handling frame_done events. A lower priority
|
||||
@ -613,7 +614,7 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
|
||||
|
||||
ret = drm_vblank_init(ddev, priv->num_crtcs);
|
||||
if (ret < 0) {
|
||||
dev_err(dev, "failed to initialize vblank\n");
|
||||
DRM_DEV_ERROR(dev, "failed to initialize vblank\n");
|
||||
goto err_msm_uninit;
|
||||
}
|
||||
|
||||
@ -622,7 +623,7 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
|
||||
ret = drm_irq_install(ddev, kms->irq);
|
||||
pm_runtime_put_sync(dev);
|
||||
if (ret < 0) {
|
||||
dev_err(dev, "failed to install IRQ handler\n");
|
||||
DRM_DEV_ERROR(dev, "failed to install IRQ handler\n");
|
||||
goto err_msm_uninit;
|
||||
}
|
||||
}
|
||||
@ -1182,7 +1183,7 @@ static int add_components_mdp(struct device *mdp_dev,
|
||||
|
||||
ret = of_graph_parse_endpoint(ep_node, &ep);
|
||||
if (ret) {
|
||||
dev_err(mdp_dev, "unable to parse port endpoint\n");
|
||||
DRM_DEV_ERROR(mdp_dev, "unable to parse port endpoint\n");
|
||||
of_node_put(ep_node);
|
||||
return ret;
|
||||
}
|
||||
@ -1233,13 +1234,13 @@ static int add_display_components(struct device *dev,
|
||||
of_device_is_compatible(dev->of_node, "qcom,sdm845-mdss")) {
|
||||
ret = of_platform_populate(dev->of_node, NULL, NULL, dev);
|
||||
if (ret) {
|
||||
dev_err(dev, "failed to populate children devices\n");
|
||||
DRM_DEV_ERROR(dev, "failed to populate children devices\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
mdp_dev = device_find_child(dev, NULL, compare_name_mdp);
|
||||
if (!mdp_dev) {
|
||||
dev_err(dev, "failed to find MDSS MDP node\n");
|
||||
DRM_DEV_ERROR(dev, "failed to find MDSS MDP node\n");
|
||||
of_platform_depopulate(dev);
|
||||
return -ENODEV;
|
||||
}
|
||||
|
@ -154,7 +154,7 @@ static struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev,
|
||||
format = kms->funcs->get_format(kms, mode_cmd->pixel_format,
|
||||
mode_cmd->modifier[0]);
|
||||
if (!format) {
|
||||
dev_err(dev->dev, "unsupported pixel format: %4.4s\n",
|
||||
DRM_DEV_ERROR(dev->dev, "unsupported pixel format: %4.4s\n",
|
||||
(char *)&mode_cmd->pixel_format);
|
||||
ret = -EINVAL;
|
||||
goto fail;
|
||||
@ -196,7 +196,7 @@ static struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev,
|
||||
|
||||
ret = drm_framebuffer_init(dev, fb, &msm_framebuffer_funcs);
|
||||
if (ret) {
|
||||
dev_err(dev->dev, "framebuffer init failed: %d\n", ret);
|
||||
DRM_DEV_ERROR(dev->dev, "framebuffer init failed: %d\n", ret);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
@ -233,13 +233,13 @@ msm_alloc_stolen_fb(struct drm_device *dev, int w, int h, int p, uint32_t format
|
||||
bo = msm_gem_new(dev, size, MSM_BO_SCANOUT | MSM_BO_WC);
|
||||
}
|
||||
if (IS_ERR(bo)) {
|
||||
dev_err(dev->dev, "failed to allocate buffer object\n");
|
||||
DRM_DEV_ERROR(dev->dev, "failed to allocate buffer object\n");
|
||||
return ERR_CAST(bo);
|
||||
}
|
||||
|
||||
fb = msm_framebuffer_init(dev, &mode_cmd, &bo);
|
||||
if (IS_ERR(fb)) {
|
||||
dev_err(dev->dev, "failed to allocate fb\n");
|
||||
DRM_DEV_ERROR(dev->dev, "failed to allocate fb\n");
|
||||
/* note: if fb creation failed, we can't rely on fb destroy
|
||||
* to unref the bo:
|
||||
*/
|
||||
|
@ -91,7 +91,7 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
|
||||
sizes->surface_height, pitch, format);
|
||||
|
||||
if (IS_ERR(fb)) {
|
||||
dev_err(dev->dev, "failed to allocate fb\n");
|
||||
DRM_DEV_ERROR(dev->dev, "failed to allocate fb\n");
|
||||
return PTR_ERR(fb);
|
||||
}
|
||||
|
||||
@ -106,13 +106,13 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
|
||||
*/
|
||||
ret = msm_gem_get_iova(bo, priv->kms->aspace, &paddr);
|
||||
if (ret) {
|
||||
dev_err(dev->dev, "failed to get buffer obj iova: %d\n", ret);
|
||||
DRM_DEV_ERROR(dev->dev, "failed to get buffer obj iova: %d\n", ret);
|
||||
goto fail_unlock;
|
||||
}
|
||||
|
||||
fbi = drm_fb_helper_alloc_fbi(helper);
|
||||
if (IS_ERR(fbi)) {
|
||||
dev_err(dev->dev, "failed to allocate fb info\n");
|
||||
DRM_DEV_ERROR(dev->dev, "failed to allocate fb info\n");
|
||||
ret = PTR_ERR(fbi);
|
||||
goto fail_unlock;
|
||||
}
|
||||
@ -176,7 +176,7 @@ struct drm_fb_helper *msm_fbdev_init(struct drm_device *dev)
|
||||
|
||||
ret = drm_fb_helper_init(dev, helper, priv->num_connectors);
|
||||
if (ret) {
|
||||
dev_err(dev->dev, "could not init fbdev: ret=%d\n", ret);
|
||||
DRM_DEV_ERROR(dev->dev, "could not init fbdev: ret=%d\n", ret);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
|
@ -88,7 +88,7 @@ static struct page **get_pages(struct drm_gem_object *obj)
|
||||
p = get_pages_vram(obj, npages);
|
||||
|
||||
if (IS_ERR(p)) {
|
||||
dev_err(dev->dev, "could not get pages: %ld\n",
|
||||
DRM_DEV_ERROR(dev->dev, "could not get pages: %ld\n",
|
||||
PTR_ERR(p));
|
||||
return p;
|
||||
}
|
||||
@ -99,7 +99,7 @@ static struct page **get_pages(struct drm_gem_object *obj)
|
||||
if (IS_ERR(msm_obj->sgt)) {
|
||||
void *ptr = ERR_CAST(msm_obj->sgt);
|
||||
|
||||
dev_err(dev->dev, "failed to allocate sgt\n");
|
||||
DRM_DEV_ERROR(dev->dev, "failed to allocate sgt\n");
|
||||
msm_obj->sgt = NULL;
|
||||
return ptr;
|
||||
}
|
||||
@ -280,7 +280,7 @@ static uint64_t mmap_offset(struct drm_gem_object *obj)
|
||||
ret = drm_gem_create_mmap_offset(obj);
|
||||
|
||||
if (ret) {
|
||||
dev_err(dev->dev, "could not allocate mmap offset\n");
|
||||
DRM_DEV_ERROR(dev->dev, "could not allocate mmap offset\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -473,7 +473,7 @@ static void *get_vaddr(struct drm_gem_object *obj, unsigned madv)
|
||||
mutex_lock(&msm_obj->lock);
|
||||
|
||||
if (WARN_ON(msm_obj->madv > madv)) {
|
||||
dev_err(obj->dev->dev, "Invalid madv state: %u vs %u\n",
|
||||
DRM_DEV_ERROR(obj->dev->dev, "Invalid madv state: %u vs %u\n",
|
||||
msm_obj->madv, madv);
|
||||
mutex_unlock(&msm_obj->lock);
|
||||
return ERR_PTR(-EBUSY);
|
||||
@ -864,7 +864,7 @@ static int msm_gem_new_impl(struct drm_device *dev,
|
||||
case MSM_BO_WC:
|
||||
break;
|
||||
default:
|
||||
dev_err(dev->dev, "invalid cache flag: %x\n",
|
||||
DRM_DEV_ERROR(dev->dev, "invalid cache flag: %x\n",
|
||||
(flags & MSM_BO_CACHE_MASK));
|
||||
return -EINVAL;
|
||||
}
|
||||
@ -990,7 +990,7 @@ struct drm_gem_object *msm_gem_import(struct drm_device *dev,
|
||||
|
||||
/* if we don't have IOMMU, don't bother pretending we can import: */
|
||||
if (!iommu_present(&platform_bus_type)) {
|
||||
dev_err(dev->dev, "cannot import without IOMMU\n");
|
||||
DRM_DEV_ERROR(dev->dev, "cannot import without IOMMU\n");
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
|
@ -107,7 +107,7 @@ static void msm_devfreq_init(struct msm_gpu *gpu)
|
||||
&msm_devfreq_profile, "simple_ondemand", NULL);
|
||||
|
||||
if (IS_ERR(gpu->devfreq.devfreq)) {
|
||||
dev_err(&gpu->pdev->dev, "Couldn't initialize GPU devfreq\n");
|
||||
DRM_DEV_ERROR(&gpu->pdev->dev, "Couldn't initialize GPU devfreq\n");
|
||||
gpu->devfreq.devfreq = NULL;
|
||||
}
|
||||
|
||||
@ -122,7 +122,7 @@ static int enable_pwrrail(struct msm_gpu *gpu)
|
||||
if (gpu->gpu_reg) {
|
||||
ret = regulator_enable(gpu->gpu_reg);
|
||||
if (ret) {
|
||||
dev_err(dev->dev, "failed to enable 'gpu_reg': %d\n", ret);
|
||||
DRM_DEV_ERROR(dev->dev, "failed to enable 'gpu_reg': %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
@ -130,7 +130,7 @@ static int enable_pwrrail(struct msm_gpu *gpu)
|
||||
if (gpu->gpu_cx) {
|
||||
ret = regulator_enable(gpu->gpu_cx);
|
||||
if (ret) {
|
||||
dev_err(dev->dev, "failed to enable 'gpu_cx': %d\n", ret);
|
||||
DRM_DEV_ERROR(dev->dev, "failed to enable 'gpu_cx': %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
@ -428,7 +428,7 @@ static void recover_worker(struct work_struct *work)
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
|
||||
dev_err(dev->dev, "%s: hangcheck recover!\n", gpu->name);
|
||||
DRM_DEV_ERROR(dev->dev, "%s: hangcheck recover!\n", gpu->name);
|
||||
|
||||
submit = find_submit(cur_ring, cur_ring->memptrs->fence + 1);
|
||||
if (submit) {
|
||||
@ -456,7 +456,7 @@ static void recover_worker(struct work_struct *work)
|
||||
rcu_read_unlock();
|
||||
|
||||
if (comm && cmd) {
|
||||
dev_err(dev->dev, "%s: offending task: %s (%s)\n",
|
||||
DRM_DEV_ERROR(dev->dev, "%s: offending task: %s (%s)\n",
|
||||
gpu->name, comm, cmd);
|
||||
|
||||
msm_rd_dump_submit(priv->hangrd, submit,
|
||||
@ -539,11 +539,11 @@ static void hangcheck_handler(struct timer_list *t)
|
||||
} else if (fence < ring->seqno) {
|
||||
/* no progress and not done.. hung! */
|
||||
ring->hangcheck_fence = fence;
|
||||
dev_err(dev->dev, "%s: hangcheck detected gpu lockup rb %d!\n",
|
||||
DRM_DEV_ERROR(dev->dev, "%s: hangcheck detected gpu lockup rb %d!\n",
|
||||
gpu->name, ring->id);
|
||||
dev_err(dev->dev, "%s: completed fence: %u\n",
|
||||
DRM_DEV_ERROR(dev->dev, "%s: completed fence: %u\n",
|
||||
gpu->name, fence);
|
||||
dev_err(dev->dev, "%s: submitted fence: %u\n",
|
||||
DRM_DEV_ERROR(dev->dev, "%s: submitted fence: %u\n",
|
||||
gpu->name, ring->seqno);
|
||||
|
||||
queue_work(priv->wq, &gpu->recover_work);
|
||||
@ -816,11 +816,11 @@ msm_gpu_create_address_space(struct msm_gpu *gpu, struct platform_device *pdev,
|
||||
iommu->geometry.aperture_start = va_start;
|
||||
iommu->geometry.aperture_end = va_end;
|
||||
|
||||
dev_info(gpu->dev->dev, "%s: using IOMMU\n", gpu->name);
|
||||
DRM_DEV_INFO(gpu->dev->dev, "%s: using IOMMU\n", gpu->name);
|
||||
|
||||
aspace = msm_gem_address_space_create(&pdev->dev, iommu, "gpu");
|
||||
if (IS_ERR(aspace)) {
|
||||
dev_err(gpu->dev->dev, "failed to init iommu: %ld\n",
|
||||
DRM_DEV_ERROR(gpu->dev->dev, "failed to init iommu: %ld\n",
|
||||
PTR_ERR(aspace));
|
||||
iommu_domain_free(iommu);
|
||||
return ERR_CAST(aspace);
|
||||
@ -871,14 +871,14 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
|
||||
gpu->irq = platform_get_irq_byname(pdev, config->irqname);
|
||||
if (gpu->irq < 0) {
|
||||
ret = gpu->irq;
|
||||
dev_err(drm->dev, "failed to get irq: %d\n", ret);
|
||||
DRM_DEV_ERROR(drm->dev, "failed to get irq: %d\n", ret);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
ret = devm_request_irq(&pdev->dev, gpu->irq, irq_handler,
|
||||
IRQF_TRIGGER_HIGH, gpu->name, gpu);
|
||||
if (ret) {
|
||||
dev_err(drm->dev, "failed to request IRQ%u: %d\n", gpu->irq, ret);
|
||||
DRM_DEV_ERROR(drm->dev, "failed to request IRQ%u: %d\n", gpu->irq, ret);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
@ -911,7 +911,7 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
|
||||
config->va_start, config->va_end);
|
||||
|
||||
if (gpu->aspace == NULL)
|
||||
dev_info(drm->dev, "%s: no IOMMU, fallback to VRAM carveout!\n", name);
|
||||
DRM_DEV_INFO(drm->dev, "%s: no IOMMU, fallback to VRAM carveout!\n", name);
|
||||
else if (IS_ERR(gpu->aspace)) {
|
||||
ret = PTR_ERR(gpu->aspace);
|
||||
goto fail;
|
||||
@ -923,7 +923,7 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
|
||||
|
||||
if (IS_ERR(memptrs)) {
|
||||
ret = PTR_ERR(memptrs);
|
||||
dev_err(drm->dev, "could not allocate memptrs: %d\n", ret);
|
||||
DRM_DEV_ERROR(drm->dev, "could not allocate memptrs: %d\n", ret);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
@ -939,7 +939,7 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
|
||||
|
||||
if (IS_ERR(gpu->rb[i])) {
|
||||
ret = PTR_ERR(gpu->rb[i]);
|
||||
dev_err(drm->dev,
|
||||
DRM_DEV_ERROR(drm->dev,
|
||||
"could not create ringbuffer %d: %d\n", i, ret);
|
||||
goto fail;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user