Merge tag 'drm-msm-fixes-2022-04-13' of https://gitlab.freedesktop.org/drm/msm into drm-fixes
Some msm fixes for v5.18. kzalloc return checks, display fix, misc locking and scheduler bug, iommu present removal. Signed-off-by: Dave Airlie <airlied@redhat.com> From: Rob Clark <robdclark@gmail.com> Link: https://patchwork.freedesktop.org/patch/msgid/CAF6AEGvuTwx09MKwK68KWXqi4o7LxDGMUz1=Z7xOS+i=OV84Ug@mail.gmail.com
This commit is contained in:
commit
98a71d12d9
Documentation/devicetree/bindings/display/msm
drivers/gpu/drm/msm
@ -160,7 +160,7 @@ examples:
|
||||
mdss: mdss@5e00000 {
|
||||
#address-cells = <1>;
|
||||
#size-cells = <1>;
|
||||
compatible = "qcom,qcm2290-mdss", "qcom,mdss";
|
||||
compatible = "qcom,qcm2290-mdss";
|
||||
reg = <0x05e00000 0x1000>;
|
||||
reg-names = "mdss";
|
||||
power-domains = <&dispcc MDSS_GDSC>;
|
||||
@ -180,7 +180,7 @@ examples:
|
||||
<&apps_smmu 0x421 0x0>;
|
||||
ranges;
|
||||
|
||||
mdss_mdp: mdp@5e01000 {
|
||||
mdss_mdp: display-controller@5e01000 {
|
||||
compatible = "qcom,qcm2290-dpu";
|
||||
reg = <0x05e01000 0x8f000>,
|
||||
<0x05eb0000 0x2008>;
|
||||
|
@ -1742,7 +1742,7 @@ a6xx_create_private_address_space(struct msm_gpu *gpu)
|
||||
return ERR_CAST(mmu);
|
||||
|
||||
return msm_gem_address_space_create(mmu,
|
||||
"gpu", 0x100000000ULL, 0x1ffffffffULL);
|
||||
"gpu", 0x100000000ULL, SZ_4G);
|
||||
}
|
||||
|
||||
static uint32_t a6xx_get_rptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
|
||||
|
@ -599,43 +599,91 @@ static const struct of_device_id dt_match[] = {
|
||||
{}
|
||||
};
|
||||
|
||||
#ifdef CONFIG_PM
|
||||
static int adreno_resume(struct device *dev)
|
||||
static int adreno_runtime_resume(struct device *dev)
|
||||
{
|
||||
struct msm_gpu *gpu = dev_to_gpu(dev);
|
||||
|
||||
return gpu->funcs->pm_resume(gpu);
|
||||
}
|
||||
|
||||
static int active_submits(struct msm_gpu *gpu)
|
||||
{
|
||||
int active_submits;
|
||||
mutex_lock(&gpu->active_lock);
|
||||
active_submits = gpu->active_submits;
|
||||
mutex_unlock(&gpu->active_lock);
|
||||
return active_submits;
|
||||
}
|
||||
|
||||
static int adreno_suspend(struct device *dev)
|
||||
static int adreno_runtime_suspend(struct device *dev)
|
||||
{
|
||||
struct msm_gpu *gpu = dev_to_gpu(dev);
|
||||
int remaining;
|
||||
|
||||
remaining = wait_event_timeout(gpu->retire_event,
|
||||
active_submits(gpu) == 0,
|
||||
msecs_to_jiffies(1000));
|
||||
if (remaining == 0) {
|
||||
dev_err(dev, "Timeout waiting for GPU to suspend\n");
|
||||
return -EBUSY;
|
||||
}
|
||||
/*
|
||||
* We should be holding a runpm ref, which will prevent
|
||||
* runtime suspend. In the system suspend path, we've
|
||||
* already waited for active jobs to complete.
|
||||
*/
|
||||
WARN_ON_ONCE(gpu->active_submits);
|
||||
|
||||
return gpu->funcs->pm_suspend(gpu);
|
||||
}
|
||||
#endif
|
||||
|
||||
static void suspend_scheduler(struct msm_gpu *gpu)
|
||||
{
|
||||
int i;
|
||||
|
||||
/*
|
||||
* Shut down the scheduler before we force suspend, so that
|
||||
* suspend isn't racing with scheduler kthread feeding us
|
||||
* more work.
|
||||
*
|
||||
* Note, we just want to park the thread, and let any jobs
|
||||
* that are already on the hw queue complete normally, as
|
||||
* opposed to the drm_sched_stop() path used for handling
|
||||
* faulting/timed-out jobs. We can't really cancel any jobs
|
||||
* already on the hw queue without racing with the GPU.
|
||||
*/
|
||||
for (i = 0; i < gpu->nr_rings; i++) {
|
||||
struct drm_gpu_scheduler *sched = &gpu->rb[i]->sched;
|
||||
kthread_park(sched->thread);
|
||||
}
|
||||
}
|
||||
|
||||
static void resume_scheduler(struct msm_gpu *gpu)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < gpu->nr_rings; i++) {
|
||||
struct drm_gpu_scheduler *sched = &gpu->rb[i]->sched;
|
||||
kthread_unpark(sched->thread);
|
||||
}
|
||||
}
|
||||
|
||||
static int adreno_system_suspend(struct device *dev)
|
||||
{
|
||||
struct msm_gpu *gpu = dev_to_gpu(dev);
|
||||
int remaining, ret;
|
||||
|
||||
suspend_scheduler(gpu);
|
||||
|
||||
remaining = wait_event_timeout(gpu->retire_event,
|
||||
gpu->active_submits == 0,
|
||||
msecs_to_jiffies(1000));
|
||||
if (remaining == 0) {
|
||||
dev_err(dev, "Timeout waiting for GPU to suspend\n");
|
||||
ret = -EBUSY;
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = pm_runtime_force_suspend(dev);
|
||||
out:
|
||||
if (ret)
|
||||
resume_scheduler(gpu);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int adreno_system_resume(struct device *dev)
|
||||
{
|
||||
resume_scheduler(dev_to_gpu(dev));
|
||||
return pm_runtime_force_resume(dev);
|
||||
}
|
||||
|
||||
static const struct dev_pm_ops adreno_pm_ops = {
|
||||
SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume)
|
||||
SET_RUNTIME_PM_OPS(adreno_suspend, adreno_resume, NULL)
|
||||
SYSTEM_SLEEP_PM_OPS(adreno_system_suspend, adreno_system_resume)
|
||||
RUNTIME_PM_OPS(adreno_runtime_suspend, adreno_runtime_resume, NULL)
|
||||
};
|
||||
|
||||
static struct platform_driver adreno_driver = {
|
||||
|
@ -54,87 +54,87 @@ struct dpu_intr_reg {
|
||||
* When making changes be sure to sync with dpu_hw_intr_reg
|
||||
*/
|
||||
static const struct dpu_intr_reg dpu_intr_set[] = {
|
||||
{
|
||||
[MDP_SSPP_TOP0_INTR] = {
|
||||
MDP_SSPP_TOP0_OFF+INTR_CLEAR,
|
||||
MDP_SSPP_TOP0_OFF+INTR_EN,
|
||||
MDP_SSPP_TOP0_OFF+INTR_STATUS
|
||||
},
|
||||
{
|
||||
[MDP_SSPP_TOP0_INTR2] = {
|
||||
MDP_SSPP_TOP0_OFF+INTR2_CLEAR,
|
||||
MDP_SSPP_TOP0_OFF+INTR2_EN,
|
||||
MDP_SSPP_TOP0_OFF+INTR2_STATUS
|
||||
},
|
||||
{
|
||||
[MDP_SSPP_TOP0_HIST_INTR] = {
|
||||
MDP_SSPP_TOP0_OFF+HIST_INTR_CLEAR,
|
||||
MDP_SSPP_TOP0_OFF+HIST_INTR_EN,
|
||||
MDP_SSPP_TOP0_OFF+HIST_INTR_STATUS
|
||||
},
|
||||
{
|
||||
[MDP_INTF0_INTR] = {
|
||||
MDP_INTF_0_OFF+INTF_INTR_CLEAR,
|
||||
MDP_INTF_0_OFF+INTF_INTR_EN,
|
||||
MDP_INTF_0_OFF+INTF_INTR_STATUS
|
||||
},
|
||||
{
|
||||
[MDP_INTF1_INTR] = {
|
||||
MDP_INTF_1_OFF+INTF_INTR_CLEAR,
|
||||
MDP_INTF_1_OFF+INTF_INTR_EN,
|
||||
MDP_INTF_1_OFF+INTF_INTR_STATUS
|
||||
},
|
||||
{
|
||||
[MDP_INTF2_INTR] = {
|
||||
MDP_INTF_2_OFF+INTF_INTR_CLEAR,
|
||||
MDP_INTF_2_OFF+INTF_INTR_EN,
|
||||
MDP_INTF_2_OFF+INTF_INTR_STATUS
|
||||
},
|
||||
{
|
||||
[MDP_INTF3_INTR] = {
|
||||
MDP_INTF_3_OFF+INTF_INTR_CLEAR,
|
||||
MDP_INTF_3_OFF+INTF_INTR_EN,
|
||||
MDP_INTF_3_OFF+INTF_INTR_STATUS
|
||||
},
|
||||
{
|
||||
[MDP_INTF4_INTR] = {
|
||||
MDP_INTF_4_OFF+INTF_INTR_CLEAR,
|
||||
MDP_INTF_4_OFF+INTF_INTR_EN,
|
||||
MDP_INTF_4_OFF+INTF_INTR_STATUS
|
||||
},
|
||||
{
|
||||
[MDP_INTF5_INTR] = {
|
||||
MDP_INTF_5_OFF+INTF_INTR_CLEAR,
|
||||
MDP_INTF_5_OFF+INTF_INTR_EN,
|
||||
MDP_INTF_5_OFF+INTF_INTR_STATUS
|
||||
},
|
||||
{
|
||||
[MDP_AD4_0_INTR] = {
|
||||
MDP_AD4_0_OFF + MDP_AD4_INTR_CLEAR_OFF,
|
||||
MDP_AD4_0_OFF + MDP_AD4_INTR_EN_OFF,
|
||||
MDP_AD4_0_OFF + MDP_AD4_INTR_STATUS_OFF,
|
||||
},
|
||||
{
|
||||
[MDP_AD4_1_INTR] = {
|
||||
MDP_AD4_1_OFF + MDP_AD4_INTR_CLEAR_OFF,
|
||||
MDP_AD4_1_OFF + MDP_AD4_INTR_EN_OFF,
|
||||
MDP_AD4_1_OFF + MDP_AD4_INTR_STATUS_OFF,
|
||||
},
|
||||
{
|
||||
[MDP_INTF0_7xxx_INTR] = {
|
||||
MDP_INTF_0_OFF_REV_7xxx+INTF_INTR_CLEAR,
|
||||
MDP_INTF_0_OFF_REV_7xxx+INTF_INTR_EN,
|
||||
MDP_INTF_0_OFF_REV_7xxx+INTF_INTR_STATUS
|
||||
},
|
||||
{
|
||||
[MDP_INTF1_7xxx_INTR] = {
|
||||
MDP_INTF_1_OFF_REV_7xxx+INTF_INTR_CLEAR,
|
||||
MDP_INTF_1_OFF_REV_7xxx+INTF_INTR_EN,
|
||||
MDP_INTF_1_OFF_REV_7xxx+INTF_INTR_STATUS
|
||||
},
|
||||
{
|
||||
[MDP_INTF2_7xxx_INTR] = {
|
||||
MDP_INTF_2_OFF_REV_7xxx+INTF_INTR_CLEAR,
|
||||
MDP_INTF_2_OFF_REV_7xxx+INTF_INTR_EN,
|
||||
MDP_INTF_2_OFF_REV_7xxx+INTF_INTR_STATUS
|
||||
},
|
||||
{
|
||||
[MDP_INTF3_7xxx_INTR] = {
|
||||
MDP_INTF_3_OFF_REV_7xxx+INTF_INTR_CLEAR,
|
||||
MDP_INTF_3_OFF_REV_7xxx+INTF_INTR_EN,
|
||||
MDP_INTF_3_OFF_REV_7xxx+INTF_INTR_STATUS
|
||||
},
|
||||
{
|
||||
[MDP_INTF4_7xxx_INTR] = {
|
||||
MDP_INTF_4_OFF_REV_7xxx+INTF_INTR_CLEAR,
|
||||
MDP_INTF_4_OFF_REV_7xxx+INTF_INTR_EN,
|
||||
MDP_INTF_4_OFF_REV_7xxx+INTF_INTR_STATUS
|
||||
},
|
||||
{
|
||||
[MDP_INTF5_7xxx_INTR] = {
|
||||
MDP_INTF_5_OFF_REV_7xxx+INTF_INTR_CLEAR,
|
||||
MDP_INTF_5_OFF_REV_7xxx+INTF_INTR_EN,
|
||||
MDP_INTF_5_OFF_REV_7xxx+INTF_INTR_STATUS
|
||||
|
@ -98,7 +98,10 @@ static void mdp5_plane_reset(struct drm_plane *plane)
|
||||
__drm_atomic_helper_plane_destroy_state(plane->state);
|
||||
|
||||
kfree(to_mdp5_plane_state(plane->state));
|
||||
plane->state = NULL;
|
||||
mdp5_state = kzalloc(sizeof(*mdp5_state), GFP_KERNEL);
|
||||
if (!mdp5_state)
|
||||
return;
|
||||
__drm_atomic_helper_plane_reset(plane, &mdp5_state->base);
|
||||
}
|
||||
|
||||
|
@ -176,6 +176,8 @@ void msm_disp_snapshot_add_block(struct msm_disp_state *disp_state, u32 len,
|
||||
va_list va;
|
||||
|
||||
new_blk = kzalloc(sizeof(struct msm_disp_state_block), GFP_KERNEL);
|
||||
if (!new_blk)
|
||||
return;
|
||||
|
||||
va_start(va, fmt);
|
||||
|
||||
|
@ -580,6 +580,12 @@ static int dp_hpd_plug_handle(struct dp_display_private *dp, u32 data)
|
||||
dp->dp_display.connector_type, state);
|
||||
mutex_unlock(&dp->event_mutex);
|
||||
|
||||
/*
|
||||
* add fail safe mode outside event_mutex scope
|
||||
* to avoid potiential circular lock with drm thread
|
||||
*/
|
||||
dp_panel_add_fail_safe_mode(dp->dp_display.connector);
|
||||
|
||||
/* uevent will complete connection part */
|
||||
return 0;
|
||||
};
|
||||
|
@ -151,6 +151,15 @@ static int dp_panel_update_modes(struct drm_connector *connector,
|
||||
return rc;
|
||||
}
|
||||
|
||||
void dp_panel_add_fail_safe_mode(struct drm_connector *connector)
|
||||
{
|
||||
/* fail safe edid */
|
||||
mutex_lock(&connector->dev->mode_config.mutex);
|
||||
if (drm_add_modes_noedid(connector, 640, 480))
|
||||
drm_set_preferred_mode(connector, 640, 480);
|
||||
mutex_unlock(&connector->dev->mode_config.mutex);
|
||||
}
|
||||
|
||||
int dp_panel_read_sink_caps(struct dp_panel *dp_panel,
|
||||
struct drm_connector *connector)
|
||||
{
|
||||
@ -207,16 +216,7 @@ int dp_panel_read_sink_caps(struct dp_panel *dp_panel,
|
||||
goto end;
|
||||
}
|
||||
|
||||
/* fail safe edid */
|
||||
mutex_lock(&connector->dev->mode_config.mutex);
|
||||
if (drm_add_modes_noedid(connector, 640, 480))
|
||||
drm_set_preferred_mode(connector, 640, 480);
|
||||
mutex_unlock(&connector->dev->mode_config.mutex);
|
||||
} else {
|
||||
/* always add fail-safe mode as backup mode */
|
||||
mutex_lock(&connector->dev->mode_config.mutex);
|
||||
drm_add_modes_noedid(connector, 640, 480);
|
||||
mutex_unlock(&connector->dev->mode_config.mutex);
|
||||
dp_panel_add_fail_safe_mode(connector);
|
||||
}
|
||||
|
||||
if (panel->aux_cfg_update_done) {
|
||||
|
@ -59,6 +59,7 @@ int dp_panel_init_panel_info(struct dp_panel *dp_panel);
|
||||
int dp_panel_deinit(struct dp_panel *dp_panel);
|
||||
int dp_panel_timing_cfg(struct dp_panel *dp_panel);
|
||||
void dp_panel_dump_regs(struct dp_panel *dp_panel);
|
||||
void dp_panel_add_fail_safe_mode(struct drm_connector *connector);
|
||||
int dp_panel_read_sink_caps(struct dp_panel *dp_panel,
|
||||
struct drm_connector *connector);
|
||||
u32 dp_panel_get_mode_bpp(struct dp_panel *dp_panel, u32 mode_max_bpp,
|
||||
|
@ -638,7 +638,7 @@ struct drm_connector *msm_dsi_manager_connector_init(u8 id)
|
||||
return connector;
|
||||
|
||||
fail:
|
||||
connector->funcs->destroy(msm_dsi->connector);
|
||||
connector->funcs->destroy(connector);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
|
@ -274,7 +274,7 @@ bool msm_use_mmu(struct drm_device *dev)
|
||||
struct msm_drm_private *priv = dev->dev_private;
|
||||
|
||||
/* a2xx comes with its own MMU */
|
||||
return priv->is_a2xx || iommu_present(&platform_bus_type);
|
||||
return priv->is_a2xx || device_iommu_mapped(dev->dev);
|
||||
}
|
||||
|
||||
static int msm_init_vram(struct drm_device *dev)
|
||||
|
@ -926,6 +926,7 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m,
|
||||
get_pid_task(aspace->pid, PIDTYPE_PID);
|
||||
if (task) {
|
||||
comm = kstrdup(task->comm, GFP_KERNEL);
|
||||
put_task_struct(task);
|
||||
} else {
|
||||
comm = NULL;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user