mirror of
https://github.com/torvalds/linux.git
synced 2024-12-29 06:12:08 +00:00
Merge branch 'msm-next' of git://people.freedesktop.org/~robclark/linux into drm-next
Noteworthy changes this time: 1) 4k support for newer chips (ganging up hwpipes and mixers) 2) using OPP bindings for gpu 3) more prep work towards per-process pagetables * 'msm-next' of git://people.freedesktop.org/~robclark/linux: (47 commits) msm/drm: gpu: Dynamically locate the clocks from the device tree drm/msm: gpu: Use OPP tables if we can drm/msm: Hard code the GPU "slow frequency" drm/msm: Add MSM_PARAM_GMEM_BASE drm/msm: Reference count address spaces drm/msm: Make sure to detach the MMU during GPU cleanup drm/msm/mdp5: Enable 3D mux in mdp5_ctl drm/msm/mdp5: Reset CTL blend registers before configuring them drm/msm/mdp5: Assign 'right' mixer to CRTC state drm/msm/mdp5: Stage border out on base stage if CRTC has 2 LMs drm/msm/mdp5: Stage right side hwpipes on Right-side Layer Mixer drm/msm/mdp5: Prepare Layer Mixers for source split drm/msm/mdp5: Configure 'right' hwpipe drm/msm/mdp5: Assign a 'right hwpipe' to plane state drm/msm/mdp5: Create mdp5_hwpipe_mode_set drm/msm/mdp5: Add optional 'right' Layer Mixer in CRTC state drm/msm/mdp5: Add a CAP for Source Split drm/msm/mdp5: Remove mixer/intf pointers from mdp5_ctl drm/msm/mdp5: Start using parameters from CRTC state drm/msm/mdp5: Add more stuff to CRTC state ...
This commit is contained in:
commit
d455937ed1
@ -40,6 +40,7 @@ msm-y := \
|
||||
mdp/mdp5/mdp5_mdss.o \
|
||||
mdp/mdp5/mdp5_kms.o \
|
||||
mdp/mdp5/mdp5_pipe.o \
|
||||
mdp/mdp5/mdp5_mixer.o \
|
||||
mdp/mdp5/mdp5_plane.o \
|
||||
mdp/mdp5/mdp5_smp.o \
|
||||
msm_atomic.o \
|
||||
|
@ -412,10 +412,8 @@ static const unsigned int a3xx_registers[] = {
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
static void a3xx_show(struct msm_gpu *gpu, struct seq_file *m)
|
||||
{
|
||||
gpu->funcs->pm_resume(gpu);
|
||||
seq_printf(m, "status: %08x\n",
|
||||
gpu_read(gpu, REG_A3XX_RBBM_STATUS));
|
||||
gpu->funcs->pm_suspend(gpu);
|
||||
adreno_show(gpu, m);
|
||||
}
|
||||
#endif
|
||||
|
@ -456,12 +456,8 @@ static const unsigned int a4xx_registers[] = {
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
static void a4xx_show(struct msm_gpu *gpu, struct seq_file *m)
|
||||
{
|
||||
gpu->funcs->pm_resume(gpu);
|
||||
|
||||
seq_printf(m, "status: %08x\n",
|
||||
gpu_read(gpu, REG_A4XX_RBBM_STATUS));
|
||||
gpu->funcs->pm_suspend(gpu);
|
||||
|
||||
adreno_show(gpu, m);
|
||||
|
||||
}
|
||||
|
@ -638,10 +638,8 @@ static void a5xx_cp_err_irq(struct msm_gpu *gpu)
|
||||
}
|
||||
}
|
||||
|
||||
static void a5xx_rbbm_err_irq(struct msm_gpu *gpu)
|
||||
static void a5xx_rbbm_err_irq(struct msm_gpu *gpu, u32 status)
|
||||
{
|
||||
u32 status = gpu_read(gpu, REG_A5XX_RBBM_INT_0_STATUS);
|
||||
|
||||
if (status & A5XX_RBBM_INT_0_MASK_RBBM_AHB_ERROR) {
|
||||
u32 val = gpu_read(gpu, REG_A5XX_RBBM_AHB_ERROR_STATUS);
|
||||
|
||||
@ -653,6 +651,10 @@ static void a5xx_rbbm_err_irq(struct msm_gpu *gpu)
|
||||
|
||||
/* Clear the error */
|
||||
gpu_write(gpu, REG_A5XX_RBBM_AHB_CMD, (1 << 4));
|
||||
|
||||
/* Clear the interrupt */
|
||||
gpu_write(gpu, REG_A5XX_RBBM_INT_CLEAR_CMD,
|
||||
A5XX_RBBM_INT_0_MASK_RBBM_AHB_ERROR);
|
||||
}
|
||||
|
||||
if (status & A5XX_RBBM_INT_0_MASK_RBBM_TRANSFER_TIMEOUT)
|
||||
@ -704,10 +706,16 @@ static irqreturn_t a5xx_irq(struct msm_gpu *gpu)
|
||||
{
|
||||
u32 status = gpu_read(gpu, REG_A5XX_RBBM_INT_0_STATUS);
|
||||
|
||||
gpu_write(gpu, REG_A5XX_RBBM_INT_CLEAR_CMD, status);
|
||||
/*
|
||||
* Clear all the interrupts except RBBM_AHB_ERROR - if we clear it
|
||||
* before the source is cleared the interrupt will storm.
|
||||
*/
|
||||
gpu_write(gpu, REG_A5XX_RBBM_INT_CLEAR_CMD,
|
||||
status & ~A5XX_RBBM_INT_0_MASK_RBBM_AHB_ERROR);
|
||||
|
||||
/* Pass status to a5xx_rbbm_err_irq because we've already cleared it */
|
||||
if (status & RBBM_ERROR_MASK)
|
||||
a5xx_rbbm_err_irq(gpu);
|
||||
a5xx_rbbm_err_irq(gpu, status);
|
||||
|
||||
if (status & A5XX_RBBM_INT_0_MASK_CP_HW_ERROR)
|
||||
a5xx_cp_err_irq(gpu);
|
||||
@ -837,12 +845,8 @@ static int a5xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value)
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
static void a5xx_show(struct msm_gpu *gpu, struct seq_file *m)
|
||||
{
|
||||
gpu->funcs->pm_resume(gpu);
|
||||
|
||||
seq_printf(m, "status: %08x\n",
|
||||
gpu_read(gpu, REG_A5XX_RBBM_STATUS));
|
||||
gpu->funcs->pm_suspend(gpu);
|
||||
|
||||
adreno_show(gpu, m);
|
||||
}
|
||||
#endif
|
||||
|
@ -2,7 +2,7 @@
|
||||
* Copyright (C) 2013-2014 Red Hat
|
||||
* Author: Rob Clark <robdclark@gmail.com>
|
||||
*
|
||||
* Copyright (c) 2014 The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2014,2017 The Linux Foundation. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 as published by
|
||||
@ -17,6 +17,7 @@
|
||||
* this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include <linux/pm_opp.h>
|
||||
#include "adreno_gpu.h"
|
||||
|
||||
#define ANY_ID 0xff
|
||||
@ -155,21 +156,14 @@ struct msm_gpu *adreno_load_gpu(struct drm_device *dev)
|
||||
|
||||
if (gpu) {
|
||||
int ret;
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
gpu->funcs->pm_resume(gpu);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
disable_irq(gpu->irq);
|
||||
|
||||
ret = gpu->funcs->hw_init(gpu);
|
||||
pm_runtime_get_sync(&pdev->dev);
|
||||
ret = msm_gpu_hw_init(gpu);
|
||||
pm_runtime_put_sync(&pdev->dev);
|
||||
if (ret) {
|
||||
dev_err(dev->dev, "gpu hw init failed: %d\n", ret);
|
||||
gpu->funcs->destroy(gpu);
|
||||
gpu = NULL;
|
||||
} else {
|
||||
enable_irq(gpu->irq);
|
||||
/* give inactive pm a chance to kick in: */
|
||||
msm_gpu_retire(gpu);
|
||||
}
|
||||
}
|
||||
|
||||
@ -220,10 +214,71 @@ static int find_chipid(struct device *dev, u32 *chipid)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Get legacy powerlevels from qcom,gpu-pwrlevels and populate the opp table */
|
||||
static int adreno_get_legacy_pwrlevels(struct device *dev)
|
||||
{
|
||||
struct device_node *child, *node;
|
||||
int ret;
|
||||
|
||||
node = of_find_compatible_node(dev->of_node, NULL,
|
||||
"qcom,gpu-pwrlevels");
|
||||
if (!node) {
|
||||
dev_err(dev, "Could not find the GPU powerlevels\n");
|
||||
return -ENXIO;
|
||||
}
|
||||
|
||||
for_each_child_of_node(node, child) {
|
||||
unsigned int val;
|
||||
|
||||
ret = of_property_read_u32(child, "qcom,gpu-freq", &val);
|
||||
if (ret)
|
||||
continue;
|
||||
|
||||
/*
|
||||
* Skip the intentionally bogus clock value found at the bottom
|
||||
* of most legacy frequency tables
|
||||
*/
|
||||
if (val != 27000000)
|
||||
dev_pm_opp_add(dev, val, 0);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int adreno_get_pwrlevels(struct device *dev,
|
||||
struct adreno_platform_config *config)
|
||||
{
|
||||
unsigned long freq = ULONG_MAX;
|
||||
struct dev_pm_opp *opp;
|
||||
int ret;
|
||||
|
||||
/* You down with OPP? */
|
||||
if (!of_find_property(dev->of_node, "operating-points-v2", NULL))
|
||||
ret = adreno_get_legacy_pwrlevels(dev);
|
||||
else
|
||||
ret = dev_pm_opp_of_add_table(dev);
|
||||
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* Find the fastest defined rate */
|
||||
opp = dev_pm_opp_find_freq_floor(dev, &freq);
|
||||
if (!IS_ERR(opp))
|
||||
config->fast_rate = dev_pm_opp_get_freq(opp);
|
||||
|
||||
if (!config->fast_rate) {
|
||||
DRM_DEV_INFO(dev,
|
||||
"Could not find clock rate. Using default\n");
|
||||
/* Pick a suitably safe clock speed for any target */
|
||||
config->fast_rate = 200000000;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int adreno_bind(struct device *dev, struct device *master, void *data)
|
||||
{
|
||||
static struct adreno_platform_config config = {};
|
||||
struct device_node *child, *node = dev->of_node;
|
||||
u32 val;
|
||||
int ret;
|
||||
|
||||
@ -238,28 +293,10 @@ static int adreno_bind(struct device *dev, struct device *master, void *data)
|
||||
|
||||
/* find clock rates: */
|
||||
config.fast_rate = 0;
|
||||
config.slow_rate = ~0;
|
||||
for_each_child_of_node(node, child) {
|
||||
if (of_device_is_compatible(child, "qcom,gpu-pwrlevels")) {
|
||||
struct device_node *pwrlvl;
|
||||
for_each_child_of_node(child, pwrlvl) {
|
||||
ret = of_property_read_u32(pwrlvl, "qcom,gpu-freq", &val);
|
||||
if (ret) {
|
||||
dev_err(dev, "could not find gpu-freq: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
config.fast_rate = max(config.fast_rate, val);
|
||||
config.slow_rate = min(config.slow_rate, val);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (!config.fast_rate) {
|
||||
dev_warn(dev, "could not find clk rates\n");
|
||||
/* This is a safe low speed for all devices: */
|
||||
config.fast_rate = 200000000;
|
||||
config.slow_rate = 27000000;
|
||||
}
|
||||
ret = adreno_get_pwrlevels(dev, &config);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
dev->platform_data = &config;
|
||||
set_gpu_pdev(dev_get_drvdata(master), to_platform_device(dev));
|
||||
@ -296,12 +333,35 @@ static const struct of_device_id dt_match[] = {
|
||||
{}
|
||||
};
|
||||
|
||||
#ifdef CONFIG_PM
|
||||
static int adreno_resume(struct device *dev)
|
||||
{
|
||||
struct platform_device *pdev = to_platform_device(dev);
|
||||
struct msm_gpu *gpu = platform_get_drvdata(pdev);
|
||||
|
||||
return gpu->funcs->pm_resume(gpu);
|
||||
}
|
||||
|
||||
static int adreno_suspend(struct device *dev)
|
||||
{
|
||||
struct platform_device *pdev = to_platform_device(dev);
|
||||
struct msm_gpu *gpu = platform_get_drvdata(pdev);
|
||||
|
||||
return gpu->funcs->pm_suspend(gpu);
|
||||
}
|
||||
#endif
|
||||
|
||||
static const struct dev_pm_ops adreno_pm_ops = {
|
||||
SET_RUNTIME_PM_OPS(adreno_suspend, adreno_resume, NULL)
|
||||
};
|
||||
|
||||
static struct platform_driver adreno_driver = {
|
||||
.probe = adreno_probe,
|
||||
.remove = adreno_remove,
|
||||
.driver = {
|
||||
.name = "adreno",
|
||||
.of_match_table = dt_match,
|
||||
.pm = &adreno_pm_ops,
|
||||
},
|
||||
};
|
||||
|
||||
|
@ -35,6 +35,9 @@ int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value)
|
||||
case MSM_PARAM_GMEM_SIZE:
|
||||
*value = adreno_gpu->gmem;
|
||||
return 0;
|
||||
case MSM_PARAM_GMEM_BASE:
|
||||
*value = 0x100000;
|
||||
return 0;
|
||||
case MSM_PARAM_CHIP_ID:
|
||||
*value = adreno_gpu->rev.patchid |
|
||||
(adreno_gpu->rev.minor << 8) |
|
||||
@ -68,6 +71,14 @@ int adreno_hw_init(struct msm_gpu *gpu)
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* reset ringbuffer: */
|
||||
gpu->rb->cur = gpu->rb->start;
|
||||
|
||||
/* reset completed fence seqno: */
|
||||
adreno_gpu->memptrs->fence = gpu->fctx->completed_fence;
|
||||
adreno_gpu->memptrs->rptr = 0;
|
||||
adreno_gpu->memptrs->wptr = 0;
|
||||
|
||||
/* Setup REG_CP_RB_CNTL: */
|
||||
adreno_gpu_write(adreno_gpu, REG_ADRENO_CP_RB_CNTL,
|
||||
/* size is log2(quad-words): */
|
||||
@ -111,29 +122,20 @@ uint32_t adreno_last_fence(struct msm_gpu *gpu)
|
||||
|
||||
void adreno_recover(struct msm_gpu *gpu)
|
||||
{
|
||||
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
|
||||
struct drm_device *dev = gpu->dev;
|
||||
int ret;
|
||||
|
||||
// XXX pm-runtime?? we *need* the device to be off after this
|
||||
// so maybe continuing to call ->pm_suspend/resume() is better?
|
||||
|
||||
gpu->funcs->pm_suspend(gpu);
|
||||
|
||||
/* reset ringbuffer: */
|
||||
gpu->rb->cur = gpu->rb->start;
|
||||
|
||||
/* reset completed fence seqno: */
|
||||
adreno_gpu->memptrs->fence = gpu->fctx->completed_fence;
|
||||
adreno_gpu->memptrs->rptr = 0;
|
||||
adreno_gpu->memptrs->wptr = 0;
|
||||
|
||||
gpu->funcs->pm_resume(gpu);
|
||||
|
||||
disable_irq(gpu->irq);
|
||||
ret = gpu->funcs->hw_init(gpu);
|
||||
ret = msm_gpu_hw_init(gpu);
|
||||
if (ret) {
|
||||
dev_err(dev->dev, "gpu hw init failed: %d\n", ret);
|
||||
/* hmm, oh well? */
|
||||
}
|
||||
enable_irq(gpu->irq);
|
||||
}
|
||||
|
||||
void adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
|
||||
@ -259,8 +261,6 @@ void adreno_show(struct msm_gpu *gpu, struct seq_file *m)
|
||||
seq_printf(m, "wptr: %d\n", adreno_gpu->memptrs->wptr);
|
||||
seq_printf(m, "rb wptr: %d\n", get_wptr(gpu->rb));
|
||||
|
||||
gpu->funcs->pm_resume(gpu);
|
||||
|
||||
/* dump these out in a form that can be parsed by demsm: */
|
||||
seq_printf(m, "IO:region %s 00000000 00020000\n", gpu->name);
|
||||
for (i = 0; adreno_gpu->registers[i] != ~0; i += 2) {
|
||||
@ -273,8 +273,6 @@ void adreno_show(struct msm_gpu *gpu, struct seq_file *m)
|
||||
seq_printf(m, "IO:R %08x %08x\n", addr<<2, val);
|
||||
}
|
||||
}
|
||||
|
||||
gpu->funcs->pm_suspend(gpu);
|
||||
}
|
||||
#endif
|
||||
|
||||
@ -354,14 +352,13 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
|
||||
adreno_gpu->rev = config->rev;
|
||||
|
||||
gpu->fast_rate = config->fast_rate;
|
||||
gpu->slow_rate = config->slow_rate;
|
||||
gpu->bus_freq = config->bus_freq;
|
||||
#ifdef DOWNSTREAM_CONFIG_MSM_BUS_SCALING
|
||||
gpu->bus_scale_table = config->bus_scale_table;
|
||||
#endif
|
||||
|
||||
DBG("fast_rate=%u, slow_rate=%u, bus_freq=%u",
|
||||
gpu->fast_rate, gpu->slow_rate, gpu->bus_freq);
|
||||
DBG("fast_rate=%u, slow_rate=27000000, bus_freq=%u",
|
||||
gpu->fast_rate, gpu->bus_freq);
|
||||
|
||||
ret = msm_gpu_init(drm, pdev, &adreno_gpu->base, &funcs->base,
|
||||
adreno_gpu->info->name, "kgsl_3d0_reg_memory", "kgsl_3d0_irq",
|
||||
@ -369,6 +366,10 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
pm_runtime_set_autosuspend_delay(&pdev->dev, DRM_MSM_INACTIVE_PERIOD);
|
||||
pm_runtime_use_autosuspend(&pdev->dev);
|
||||
pm_runtime_enable(&pdev->dev);
|
||||
|
||||
ret = request_firmware(&adreno_gpu->pm4, adreno_gpu->info->pm4fw, drm->dev);
|
||||
if (ret) {
|
||||
dev_err(drm->dev, "failed to load %s PM4 firmware: %d\n",
|
||||
@ -439,6 +440,6 @@ void adreno_gpu_cleanup(struct adreno_gpu *adreno_gpu)
|
||||
if (gpu->aspace) {
|
||||
gpu->aspace->mmu->funcs->detach(gpu->aspace->mmu,
|
||||
iommu_ports, ARRAY_SIZE(iommu_ports));
|
||||
msm_gem_address_space_destroy(gpu->aspace);
|
||||
msm_gem_address_space_put(gpu->aspace);
|
||||
}
|
||||
}
|
||||
|
@ -123,7 +123,7 @@ struct adreno_gpu {
|
||||
/* platform config data (ie. from DT, or pdata) */
|
||||
struct adreno_platform_config {
|
||||
struct adreno_rev rev;
|
||||
uint32_t fast_rate, slow_rate, bus_freq;
|
||||
uint32_t fast_rate, bus_freq;
|
||||
#ifdef DOWNSTREAM_CONFIG_MSM_BUS_SCALING
|
||||
struct msm_bus_scale_pdata *bus_scale_table;
|
||||
#endif
|
||||
|
@ -114,15 +114,9 @@ static void complete_flip(struct drm_crtc *crtc, struct drm_file *file)
|
||||
spin_lock_irqsave(&dev->event_lock, flags);
|
||||
event = mdp4_crtc->event;
|
||||
if (event) {
|
||||
/* if regular vblank case (!file) or if cancel-flip from
|
||||
* preclose on file that requested flip, then send the
|
||||
* event:
|
||||
*/
|
||||
if (!file || (event->base.file_priv == file)) {
|
||||
mdp4_crtc->event = NULL;
|
||||
DBG("%s: send event: %p", mdp4_crtc->name, event);
|
||||
drm_crtc_send_vblank_event(crtc, event);
|
||||
}
|
||||
mdp4_crtc->event = NULL;
|
||||
DBG("%s: send event: %p", mdp4_crtc->name, event);
|
||||
drm_crtc_send_vblank_event(crtc, event);
|
||||
}
|
||||
spin_unlock_irqrestore(&dev->event_lock, flags);
|
||||
}
|
||||
|
@ -169,7 +169,7 @@ static void mdp4_destroy(struct msm_kms *kms)
|
||||
if (aspace) {
|
||||
aspace->mmu->funcs->detach(aspace->mmu,
|
||||
iommu_ports, ARRAY_SIZE(iommu_ports));
|
||||
msm_gem_address_space_destroy(aspace);
|
||||
msm_gem_address_space_put(aspace);
|
||||
}
|
||||
|
||||
if (mdp4_kms->rpm_enabled)
|
||||
|
@ -70,6 +70,18 @@ const struct mdp5_cfg_hw msm8x74v1_config = {
|
||||
.lm = {
|
||||
.count = 5,
|
||||
.base = { 0x03100, 0x03500, 0x03900, 0x03d00, 0x04100 },
|
||||
.instances = {
|
||||
{ .id = 0, .pp = 0, .dspp = 0,
|
||||
.caps = MDP_LM_CAP_DISPLAY, },
|
||||
{ .id = 1, .pp = 1, .dspp = 1,
|
||||
.caps = MDP_LM_CAP_DISPLAY, },
|
||||
{ .id = 2, .pp = 2, .dspp = 2,
|
||||
.caps = MDP_LM_CAP_DISPLAY, },
|
||||
{ .id = 3, .pp = -1, .dspp = -1,
|
||||
.caps = MDP_LM_CAP_WB },
|
||||
{ .id = 4, .pp = -1, .dspp = -1,
|
||||
.caps = MDP_LM_CAP_WB },
|
||||
},
|
||||
.nb_stages = 5,
|
||||
},
|
||||
.dspp = {
|
||||
@ -134,6 +146,18 @@ const struct mdp5_cfg_hw msm8x74v2_config = {
|
||||
.lm = {
|
||||
.count = 5,
|
||||
.base = { 0x03100, 0x03500, 0x03900, 0x03d00, 0x04100 },
|
||||
.instances = {
|
||||
{ .id = 0, .pp = 0, .dspp = 0,
|
||||
.caps = MDP_LM_CAP_DISPLAY, },
|
||||
{ .id = 1, .pp = 1, .dspp = 1,
|
||||
.caps = MDP_LM_CAP_DISPLAY, },
|
||||
{ .id = 2, .pp = 2, .dspp = 2,
|
||||
.caps = MDP_LM_CAP_DISPLAY, },
|
||||
{ .id = 3, .pp = -1, .dspp = -1,
|
||||
.caps = MDP_LM_CAP_WB, },
|
||||
{ .id = 4, .pp = -1, .dspp = -1,
|
||||
.caps = MDP_LM_CAP_WB, },
|
||||
},
|
||||
.nb_stages = 5,
|
||||
.max_width = 2048,
|
||||
.max_height = 0xFFFF,
|
||||
@ -167,6 +191,7 @@ const struct mdp5_cfg_hw apq8084_config = {
|
||||
.mdp = {
|
||||
.count = 1,
|
||||
.caps = MDP_CAP_SMP |
|
||||
MDP_CAP_SRC_SPLIT |
|
||||
0,
|
||||
},
|
||||
.smp = {
|
||||
@ -211,6 +236,22 @@ const struct mdp5_cfg_hw apq8084_config = {
|
||||
.lm = {
|
||||
.count = 6,
|
||||
.base = { 0x03900, 0x03d00, 0x04100, 0x04500, 0x04900, 0x04d00 },
|
||||
.instances = {
|
||||
{ .id = 0, .pp = 0, .dspp = 0,
|
||||
.caps = MDP_LM_CAP_DISPLAY |
|
||||
MDP_LM_CAP_PAIR, },
|
||||
{ .id = 1, .pp = 1, .dspp = 1,
|
||||
.caps = MDP_LM_CAP_DISPLAY, },
|
||||
{ .id = 2, .pp = 2, .dspp = 2,
|
||||
.caps = MDP_LM_CAP_DISPLAY |
|
||||
MDP_LM_CAP_PAIR, },
|
||||
{ .id = 3, .pp = -1, .dspp = -1,
|
||||
.caps = MDP_LM_CAP_WB, },
|
||||
{ .id = 4, .pp = -1, .dspp = -1,
|
||||
.caps = MDP_LM_CAP_WB, },
|
||||
{ .id = 5, .pp = 3, .dspp = 3,
|
||||
.caps = MDP_LM_CAP_DISPLAY, },
|
||||
},
|
||||
.nb_stages = 5,
|
||||
.max_width = 2048,
|
||||
.max_height = 0xFFFF,
|
||||
@ -282,6 +323,12 @@ const struct mdp5_cfg_hw msm8x16_config = {
|
||||
.lm = {
|
||||
.count = 2, /* LM0 and LM3 */
|
||||
.base = { 0x44000, 0x47000 },
|
||||
.instances = {
|
||||
{ .id = 0, .pp = 0, .dspp = 0,
|
||||
.caps = MDP_LM_CAP_DISPLAY, },
|
||||
{ .id = 3, .pp = -1, .dspp = -1,
|
||||
.caps = MDP_LM_CAP_WB },
|
||||
},
|
||||
.nb_stages = 8,
|
||||
.max_width = 2048,
|
||||
.max_height = 0xFFFF,
|
||||
@ -306,6 +353,7 @@ const struct mdp5_cfg_hw msm8x94_config = {
|
||||
.mdp = {
|
||||
.count = 1,
|
||||
.caps = MDP_CAP_SMP |
|
||||
MDP_CAP_SRC_SPLIT |
|
||||
0,
|
||||
},
|
||||
.smp = {
|
||||
@ -350,6 +398,22 @@ const struct mdp5_cfg_hw msm8x94_config = {
|
||||
.lm = {
|
||||
.count = 6,
|
||||
.base = { 0x44000, 0x45000, 0x46000, 0x47000, 0x48000, 0x49000 },
|
||||
.instances = {
|
||||
{ .id = 0, .pp = 0, .dspp = 0,
|
||||
.caps = MDP_LM_CAP_DISPLAY |
|
||||
MDP_LM_CAP_PAIR, },
|
||||
{ .id = 1, .pp = 1, .dspp = 1,
|
||||
.caps = MDP_LM_CAP_DISPLAY, },
|
||||
{ .id = 2, .pp = 2, .dspp = 2,
|
||||
.caps = MDP_LM_CAP_DISPLAY |
|
||||
MDP_LM_CAP_PAIR, },
|
||||
{ .id = 3, .pp = -1, .dspp = -1,
|
||||
.caps = MDP_LM_CAP_WB, },
|
||||
{ .id = 4, .pp = -1, .dspp = -1,
|
||||
.caps = MDP_LM_CAP_WB, },
|
||||
{ .id = 5, .pp = 3, .dspp = 3,
|
||||
.caps = MDP_LM_CAP_DISPLAY, },
|
||||
},
|
||||
.nb_stages = 8,
|
||||
.max_width = 2048,
|
||||
.max_height = 0xFFFF,
|
||||
@ -385,6 +449,7 @@ const struct mdp5_cfg_hw msm8x96_config = {
|
||||
.count = 1,
|
||||
.caps = MDP_CAP_DSC |
|
||||
MDP_CAP_CDM |
|
||||
MDP_CAP_SRC_SPLIT |
|
||||
0,
|
||||
},
|
||||
.ctl = {
|
||||
@ -434,6 +499,22 @@ const struct mdp5_cfg_hw msm8x96_config = {
|
||||
.lm = {
|
||||
.count = 6,
|
||||
.base = { 0x44000, 0x45000, 0x46000, 0x47000, 0x48000, 0x49000 },
|
||||
.instances = {
|
||||
{ .id = 0, .pp = 0, .dspp = 0,
|
||||
.caps = MDP_LM_CAP_DISPLAY |
|
||||
MDP_LM_CAP_PAIR, },
|
||||
{ .id = 1, .pp = 1, .dspp = 1,
|
||||
.caps = MDP_LM_CAP_DISPLAY, },
|
||||
{ .id = 2, .pp = 2, .dspp = -1,
|
||||
.caps = MDP_LM_CAP_DISPLAY |
|
||||
MDP_LM_CAP_PAIR, },
|
||||
{ .id = 3, .pp = -1, .dspp = -1,
|
||||
.caps = MDP_LM_CAP_WB, },
|
||||
{ .id = 4, .pp = -1, .dspp = -1,
|
||||
.caps = MDP_LM_CAP_WB, },
|
||||
{ .id = 5, .pp = 3, .dspp = -1,
|
||||
.caps = MDP_LM_CAP_DISPLAY, },
|
||||
},
|
||||
.nb_stages = 8,
|
||||
.max_width = 2560,
|
||||
.max_height = 0xFFFF,
|
||||
|
@ -39,8 +39,16 @@ struct mdp5_sub_block {
|
||||
MDP5_SUB_BLOCK_DEFINITION;
|
||||
};
|
||||
|
||||
struct mdp5_lm_instance {
|
||||
int id;
|
||||
int pp;
|
||||
int dspp;
|
||||
uint32_t caps;
|
||||
};
|
||||
|
||||
struct mdp5_lm_block {
|
||||
MDP5_SUB_BLOCK_DEFINITION;
|
||||
struct mdp5_lm_instance instances[MAX_BASES];
|
||||
uint32_t nb_stages; /* number of stages per blender */
|
||||
uint32_t max_width; /* Maximum output resolution */
|
||||
uint32_t max_height;
|
||||
|
@ -51,7 +51,8 @@ static int pingpong_tearcheck_setup(struct drm_encoder *encoder,
|
||||
struct device *dev = encoder->dev->dev;
|
||||
u32 total_lines_x100, vclks_line, cfg;
|
||||
long vsync_clk_speed;
|
||||
int pp_id = GET_PING_PONG_ID(mdp5_crtc_get_lm(encoder->crtc));
|
||||
struct mdp5_hw_mixer *mixer = mdp5_crtc_get_mixer(encoder->crtc);
|
||||
int pp_id = mixer->pp;
|
||||
|
||||
if (IS_ERR_OR_NULL(mdp5_kms->vsync_clk)) {
|
||||
dev_err(dev, "vsync_clk is not initialized\n");
|
||||
@ -94,7 +95,8 @@ static int pingpong_tearcheck_setup(struct drm_encoder *encoder,
|
||||
static int pingpong_tearcheck_enable(struct drm_encoder *encoder)
|
||||
{
|
||||
struct mdp5_kms *mdp5_kms = get_kms(encoder);
|
||||
int pp_id = GET_PING_PONG_ID(mdp5_crtc_get_lm(encoder->crtc));
|
||||
struct mdp5_hw_mixer *mixer = mdp5_crtc_get_mixer(encoder->crtc);
|
||||
int pp_id = mixer->pp;
|
||||
int ret;
|
||||
|
||||
ret = clk_set_rate(mdp5_kms->vsync_clk,
|
||||
@ -119,7 +121,8 @@ static int pingpong_tearcheck_enable(struct drm_encoder *encoder)
|
||||
static void pingpong_tearcheck_disable(struct drm_encoder *encoder)
|
||||
{
|
||||
struct mdp5_kms *mdp5_kms = get_kms(encoder);
|
||||
int pp_id = GET_PING_PONG_ID(mdp5_crtc_get_lm(encoder->crtc));
|
||||
struct mdp5_hw_mixer *mixer = mdp5_crtc_get_mixer(encoder->crtc);
|
||||
int pp_id = mixer->pp;
|
||||
|
||||
mdp5_write(mdp5_kms, REG_MDP5_PP_TEAR_CHECK_EN(pp_id), 0);
|
||||
clk_disable_unprepare(mdp5_kms->vsync_clk);
|
||||
@ -129,8 +132,6 @@ void mdp5_cmd_encoder_mode_set(struct drm_encoder *encoder,
|
||||
struct drm_display_mode *mode,
|
||||
struct drm_display_mode *adjusted_mode)
|
||||
{
|
||||
struct mdp5_encoder *mdp5_cmd_enc = to_mdp5_encoder(encoder);
|
||||
|
||||
mode = adjusted_mode;
|
||||
|
||||
DBG("set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x",
|
||||
@ -142,23 +143,23 @@ void mdp5_cmd_encoder_mode_set(struct drm_encoder *encoder,
|
||||
mode->vsync_end, mode->vtotal,
|
||||
mode->type, mode->flags);
|
||||
pingpong_tearcheck_setup(encoder, mode);
|
||||
mdp5_crtc_set_pipeline(encoder->crtc, &mdp5_cmd_enc->intf,
|
||||
mdp5_cmd_enc->ctl);
|
||||
mdp5_crtc_set_pipeline(encoder->crtc);
|
||||
}
|
||||
|
||||
void mdp5_cmd_encoder_disable(struct drm_encoder *encoder)
|
||||
{
|
||||
struct mdp5_encoder *mdp5_cmd_enc = to_mdp5_encoder(encoder);
|
||||
struct mdp5_ctl *ctl = mdp5_cmd_enc->ctl;
|
||||
struct mdp5_interface *intf = &mdp5_cmd_enc->intf;
|
||||
struct mdp5_interface *intf = mdp5_cmd_enc->intf;
|
||||
struct mdp5_pipeline *pipeline = mdp5_crtc_get_pipeline(encoder->crtc);
|
||||
|
||||
if (WARN_ON(!mdp5_cmd_enc->enabled))
|
||||
return;
|
||||
|
||||
pingpong_tearcheck_disable(encoder);
|
||||
|
||||
mdp5_ctl_set_encoder_state(ctl, false);
|
||||
mdp5_ctl_commit(ctl, mdp_ctl_flush_mask_encoder(intf));
|
||||
mdp5_ctl_set_encoder_state(ctl, pipeline, false);
|
||||
mdp5_ctl_commit(ctl, pipeline, mdp_ctl_flush_mask_encoder(intf));
|
||||
|
||||
bs_set(mdp5_cmd_enc, 0);
|
||||
|
||||
@ -169,7 +170,8 @@ void mdp5_cmd_encoder_enable(struct drm_encoder *encoder)
|
||||
{
|
||||
struct mdp5_encoder *mdp5_cmd_enc = to_mdp5_encoder(encoder);
|
||||
struct mdp5_ctl *ctl = mdp5_cmd_enc->ctl;
|
||||
struct mdp5_interface *intf = &mdp5_cmd_enc->intf;
|
||||
struct mdp5_interface *intf = mdp5_cmd_enc->intf;
|
||||
struct mdp5_pipeline *pipeline = mdp5_crtc_get_pipeline(encoder->crtc);
|
||||
|
||||
if (WARN_ON(mdp5_cmd_enc->enabled))
|
||||
return;
|
||||
@ -178,9 +180,9 @@ void mdp5_cmd_encoder_enable(struct drm_encoder *encoder)
|
||||
if (pingpong_tearcheck_enable(encoder))
|
||||
return;
|
||||
|
||||
mdp5_ctl_commit(ctl, mdp_ctl_flush_mask_encoder(intf));
|
||||
mdp5_ctl_commit(ctl, pipeline, mdp_ctl_flush_mask_encoder(intf));
|
||||
|
||||
mdp5_ctl_set_encoder_state(ctl, true);
|
||||
mdp5_ctl_set_encoder_state(ctl, pipeline, true);
|
||||
|
||||
mdp5_cmd_enc->enabled = true;
|
||||
}
|
||||
@ -197,7 +199,7 @@ int mdp5_cmd_encoder_set_split_display(struct drm_encoder *encoder,
|
||||
return -EINVAL;
|
||||
|
||||
mdp5_kms = get_kms(encoder);
|
||||
intf_num = mdp5_cmd_enc->intf.num;
|
||||
intf_num = mdp5_cmd_enc->intf->num;
|
||||
|
||||
/* Switch slave encoder's trigger MUX, to use the master's
|
||||
* start signal for the slave encoder
|
||||
|
@ -32,13 +32,7 @@ struct mdp5_crtc {
|
||||
int id;
|
||||
bool enabled;
|
||||
|
||||
/* layer mixer used for this CRTC (+ its lock): */
|
||||
#define GET_LM_ID(crtc_id) ((crtc_id == 3) ? 5 : crtc_id)
|
||||
int lm;
|
||||
spinlock_t lm_lock; /* protect REG_MDP5_LM_* registers */
|
||||
|
||||
/* CTL used for this CRTC: */
|
||||
struct mdp5_ctl *ctl;
|
||||
spinlock_t lm_lock; /* protect REG_MDP5_LM_* registers */
|
||||
|
||||
/* if there is a pending flip, these will be non-null: */
|
||||
struct drm_pending_vblank_event *event;
|
||||
@ -61,8 +55,6 @@ struct mdp5_crtc {
|
||||
|
||||
struct completion pp_completion;
|
||||
|
||||
bool cmd_mode;
|
||||
|
||||
struct {
|
||||
/* protect REG_MDP5_LM_CURSOR* registers and cursor scanout_bo*/
|
||||
spinlock_t lock;
|
||||
@ -97,10 +89,12 @@ static void request_pp_done_pending(struct drm_crtc *crtc)
|
||||
|
||||
static u32 crtc_flush(struct drm_crtc *crtc, u32 flush_mask)
|
||||
{
|
||||
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
|
||||
struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
|
||||
struct mdp5_ctl *ctl = mdp5_cstate->ctl;
|
||||
struct mdp5_pipeline *pipeline = &mdp5_cstate->pipeline;
|
||||
|
||||
DBG("%s: flush=%08x", crtc->name, flush_mask);
|
||||
return mdp5_ctl_commit(mdp5_crtc->ctl, flush_mask);
|
||||
return mdp5_ctl_commit(ctl, pipeline, flush_mask);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -110,19 +104,25 @@ static u32 crtc_flush(struct drm_crtc *crtc, u32 flush_mask)
|
||||
*/
|
||||
static u32 crtc_flush_all(struct drm_crtc *crtc)
|
||||
{
|
||||
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
|
||||
struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
|
||||
struct mdp5_hw_mixer *mixer, *r_mixer;
|
||||
struct drm_plane *plane;
|
||||
uint32_t flush_mask = 0;
|
||||
|
||||
/* this should not happen: */
|
||||
if (WARN_ON(!mdp5_crtc->ctl))
|
||||
if (WARN_ON(!mdp5_cstate->ctl))
|
||||
return 0;
|
||||
|
||||
drm_atomic_crtc_for_each_plane(plane, crtc) {
|
||||
flush_mask |= mdp5_plane_get_flush(plane);
|
||||
}
|
||||
|
||||
flush_mask |= mdp_ctl_flush_mask_lm(mdp5_crtc->lm);
|
||||
mixer = mdp5_cstate->pipeline.mixer;
|
||||
flush_mask |= mdp_ctl_flush_mask_lm(mixer->lm);
|
||||
|
||||
r_mixer = mdp5_cstate->pipeline.r_mixer;
|
||||
if (r_mixer)
|
||||
flush_mask |= mdp_ctl_flush_mask_lm(r_mixer->lm);
|
||||
|
||||
return crtc_flush(crtc, flush_mask);
|
||||
}
|
||||
@ -130,7 +130,10 @@ static u32 crtc_flush_all(struct drm_crtc *crtc)
|
||||
/* if file!=NULL, this is preclose potential cancel-flip path */
|
||||
static void complete_flip(struct drm_crtc *crtc, struct drm_file *file)
|
||||
{
|
||||
struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
|
||||
struct mdp5_pipeline *pipeline = &mdp5_cstate->pipeline;
|
||||
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
|
||||
struct mdp5_ctl *ctl = mdp5_cstate->ctl;
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct drm_pending_vblank_event *event;
|
||||
unsigned long flags;
|
||||
@ -138,22 +141,17 @@ static void complete_flip(struct drm_crtc *crtc, struct drm_file *file)
|
||||
spin_lock_irqsave(&dev->event_lock, flags);
|
||||
event = mdp5_crtc->event;
|
||||
if (event) {
|
||||
/* if regular vblank case (!file) or if cancel-flip from
|
||||
* preclose on file that requested flip, then send the
|
||||
* event:
|
||||
*/
|
||||
if (!file || (event->base.file_priv == file)) {
|
||||
mdp5_crtc->event = NULL;
|
||||
DBG("%s: send event: %p", crtc->name, event);
|
||||
drm_crtc_send_vblank_event(crtc, event);
|
||||
}
|
||||
mdp5_crtc->event = NULL;
|
||||
DBG("%s: send event: %p", crtc->name, event);
|
||||
drm_crtc_send_vblank_event(crtc, event);
|
||||
}
|
||||
spin_unlock_irqrestore(&dev->event_lock, flags);
|
||||
|
||||
if (mdp5_crtc->ctl && !crtc->state->enable) {
|
||||
if (ctl && !crtc->state->enable) {
|
||||
/* set STAGE_UNUSED for all layers */
|
||||
mdp5_ctl_blend(mdp5_crtc->ctl, NULL, 0, 0);
|
||||
mdp5_crtc->ctl = NULL;
|
||||
mdp5_ctl_blend(ctl, pipeline, NULL, NULL, 0, 0);
|
||||
/* XXX: What to do here? */
|
||||
/* mdp5_crtc->ctl = NULL; */
|
||||
}
|
||||
}
|
||||
|
||||
@ -192,6 +190,12 @@ static inline u32 mdp5_lm_use_fg_alpha_mask(enum mdp_mixer_stage_id stage)
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* left/right pipe offsets for the stage array used in blend_setup()
|
||||
*/
|
||||
#define PIPE_LEFT 0
|
||||
#define PIPE_RIGHT 1
|
||||
|
||||
/*
|
||||
* blend_setup() - blend all the planes of a CRTC
|
||||
*
|
||||
@ -202,18 +206,26 @@ static inline u32 mdp5_lm_use_fg_alpha_mask(enum mdp_mixer_stage_id stage)
|
||||
static void blend_setup(struct drm_crtc *crtc)
|
||||
{
|
||||
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
|
||||
struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
|
||||
struct mdp5_pipeline *pipeline = &mdp5_cstate->pipeline;
|
||||
struct mdp5_kms *mdp5_kms = get_kms(crtc);
|
||||
struct drm_plane *plane;
|
||||
const struct mdp5_cfg_hw *hw_cfg;
|
||||
struct mdp5_plane_state *pstate, *pstates[STAGE_MAX + 1] = {NULL};
|
||||
const struct mdp_format *format;
|
||||
uint32_t lm = mdp5_crtc->lm;
|
||||
struct mdp5_hw_mixer *mixer = pipeline->mixer;
|
||||
uint32_t lm = mixer->lm;
|
||||
struct mdp5_hw_mixer *r_mixer = pipeline->r_mixer;
|
||||
uint32_t r_lm = r_mixer ? r_mixer->lm : 0;
|
||||
struct mdp5_ctl *ctl = mdp5_cstate->ctl;
|
||||
uint32_t blend_op, fg_alpha, bg_alpha, ctl_blend_flags = 0;
|
||||
unsigned long flags;
|
||||
enum mdp5_pipe stage[STAGE_MAX + 1] = { SSPP_NONE };
|
||||
enum mdp5_pipe stage[STAGE_MAX + 1][MAX_PIPE_STAGE] = { SSPP_NONE };
|
||||
enum mdp5_pipe r_stage[STAGE_MAX + 1][MAX_PIPE_STAGE] = { SSPP_NONE };
|
||||
int i, plane_cnt = 0;
|
||||
bool bg_alpha_enabled = false;
|
||||
u32 mixer_op_mode = 0;
|
||||
u32 val;
|
||||
#define blender(stage) ((stage) - STAGE0)
|
||||
|
||||
hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
|
||||
@ -221,14 +233,35 @@ static void blend_setup(struct drm_crtc *crtc)
|
||||
spin_lock_irqsave(&mdp5_crtc->lm_lock, flags);
|
||||
|
||||
/* ctl could be released already when we are shutting down: */
|
||||
if (!mdp5_crtc->ctl)
|
||||
/* XXX: Can this happen now? */
|
||||
if (!ctl)
|
||||
goto out;
|
||||
|
||||
/* Collect all plane information */
|
||||
drm_atomic_crtc_for_each_plane(plane, crtc) {
|
||||
enum mdp5_pipe right_pipe;
|
||||
|
||||
pstate = to_mdp5_plane_state(plane->state);
|
||||
pstates[pstate->stage] = pstate;
|
||||
stage[pstate->stage] = mdp5_plane_pipe(plane);
|
||||
stage[pstate->stage][PIPE_LEFT] = mdp5_plane_pipe(plane);
|
||||
/*
|
||||
* if we have a right mixer, stage the same pipe as we
|
||||
* have on the left mixer
|
||||
*/
|
||||
if (r_mixer)
|
||||
r_stage[pstate->stage][PIPE_LEFT] =
|
||||
mdp5_plane_pipe(plane);
|
||||
/*
|
||||
* if we have a right pipe (i.e, the plane comprises of 2
|
||||
* hwpipes, then stage the right pipe on the right side of both
|
||||
* the layer mixers
|
||||
*/
|
||||
right_pipe = mdp5_plane_right_pipe(plane);
|
||||
if (right_pipe) {
|
||||
stage[pstate->stage][PIPE_RIGHT] = right_pipe;
|
||||
r_stage[pstate->stage][PIPE_RIGHT] = right_pipe;
|
||||
}
|
||||
|
||||
plane_cnt++;
|
||||
}
|
||||
|
||||
@ -294,12 +327,27 @@ static void blend_setup(struct drm_crtc *crtc)
|
||||
blender(i)), fg_alpha);
|
||||
mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_BG_ALPHA(lm,
|
||||
blender(i)), bg_alpha);
|
||||
if (r_mixer) {
|
||||
mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_OP_MODE(r_lm,
|
||||
blender(i)), blend_op);
|
||||
mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_FG_ALPHA(r_lm,
|
||||
blender(i)), fg_alpha);
|
||||
mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_BG_ALPHA(r_lm,
|
||||
blender(i)), bg_alpha);
|
||||
}
|
||||
}
|
||||
|
||||
mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(lm), mixer_op_mode);
|
||||
|
||||
mdp5_ctl_blend(mdp5_crtc->ctl, stage, plane_cnt, ctl_blend_flags);
|
||||
val = mdp5_read(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(lm));
|
||||
mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(lm),
|
||||
val | mixer_op_mode);
|
||||
if (r_mixer) {
|
||||
val = mdp5_read(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(r_lm));
|
||||
mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(r_lm),
|
||||
val | mixer_op_mode);
|
||||
}
|
||||
|
||||
mdp5_ctl_blend(ctl, pipeline, stage, r_stage, plane_cnt,
|
||||
ctl_blend_flags);
|
||||
out:
|
||||
spin_unlock_irqrestore(&mdp5_crtc->lm_lock, flags);
|
||||
}
|
||||
@ -307,7 +355,12 @@ out:
|
||||
static void mdp5_crtc_mode_set_nofb(struct drm_crtc *crtc)
|
||||
{
|
||||
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
|
||||
struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
|
||||
struct mdp5_kms *mdp5_kms = get_kms(crtc);
|
||||
struct mdp5_hw_mixer *mixer = mdp5_cstate->pipeline.mixer;
|
||||
struct mdp5_hw_mixer *r_mixer = mdp5_cstate->pipeline.r_mixer;
|
||||
uint32_t lm = mixer->lm;
|
||||
u32 mixer_width, val;
|
||||
unsigned long flags;
|
||||
struct drm_display_mode *mode;
|
||||
|
||||
@ -325,16 +378,40 @@ static void mdp5_crtc_mode_set_nofb(struct drm_crtc *crtc)
|
||||
mode->vsync_end, mode->vtotal,
|
||||
mode->type, mode->flags);
|
||||
|
||||
mixer_width = mode->hdisplay;
|
||||
if (r_mixer)
|
||||
mixer_width /= 2;
|
||||
|
||||
spin_lock_irqsave(&mdp5_crtc->lm_lock, flags);
|
||||
mdp5_write(mdp5_kms, REG_MDP5_LM_OUT_SIZE(mdp5_crtc->lm),
|
||||
MDP5_LM_OUT_SIZE_WIDTH(mode->hdisplay) |
|
||||
mdp5_write(mdp5_kms, REG_MDP5_LM_OUT_SIZE(lm),
|
||||
MDP5_LM_OUT_SIZE_WIDTH(mixer_width) |
|
||||
MDP5_LM_OUT_SIZE_HEIGHT(mode->vdisplay));
|
||||
|
||||
/* Assign mixer to LEFT side in source split mode */
|
||||
val = mdp5_read(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(lm));
|
||||
val &= ~MDP5_LM_BLEND_COLOR_OUT_SPLIT_LEFT_RIGHT;
|
||||
mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(lm), val);
|
||||
|
||||
if (r_mixer) {
|
||||
u32 r_lm = r_mixer->lm;
|
||||
|
||||
mdp5_write(mdp5_kms, REG_MDP5_LM_OUT_SIZE(r_lm),
|
||||
MDP5_LM_OUT_SIZE_WIDTH(mixer_width) |
|
||||
MDP5_LM_OUT_SIZE_HEIGHT(mode->vdisplay));
|
||||
|
||||
/* Assign mixer to RIGHT side in source split mode */
|
||||
val = mdp5_read(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(r_lm));
|
||||
val |= MDP5_LM_BLEND_COLOR_OUT_SPLIT_LEFT_RIGHT;
|
||||
mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(r_lm), val);
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&mdp5_crtc->lm_lock, flags);
|
||||
}
|
||||
|
||||
static void mdp5_crtc_disable(struct drm_crtc *crtc)
|
||||
{
|
||||
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
|
||||
struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
|
||||
struct mdp5_kms *mdp5_kms = get_kms(crtc);
|
||||
|
||||
DBG("%s", crtc->name);
|
||||
@ -342,7 +419,7 @@ static void mdp5_crtc_disable(struct drm_crtc *crtc)
|
||||
if (WARN_ON(!mdp5_crtc->enabled))
|
||||
return;
|
||||
|
||||
if (mdp5_crtc->cmd_mode)
|
||||
if (mdp5_cstate->cmd_mode)
|
||||
mdp_irq_unregister(&mdp5_kms->base, &mdp5_crtc->pp_done);
|
||||
|
||||
mdp_irq_unregister(&mdp5_kms->base, &mdp5_crtc->err);
|
||||
@ -354,6 +431,7 @@ static void mdp5_crtc_disable(struct drm_crtc *crtc)
|
||||
static void mdp5_crtc_enable(struct drm_crtc *crtc)
|
||||
{
|
||||
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
|
||||
struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
|
||||
struct mdp5_kms *mdp5_kms = get_kms(crtc);
|
||||
|
||||
DBG("%s", crtc->name);
|
||||
@ -364,12 +442,73 @@ static void mdp5_crtc_enable(struct drm_crtc *crtc)
|
||||
mdp5_enable(mdp5_kms);
|
||||
mdp_irq_register(&mdp5_kms->base, &mdp5_crtc->err);
|
||||
|
||||
if (mdp5_crtc->cmd_mode)
|
||||
if (mdp5_cstate->cmd_mode)
|
||||
mdp_irq_register(&mdp5_kms->base, &mdp5_crtc->pp_done);
|
||||
|
||||
mdp5_crtc->enabled = true;
|
||||
}
|
||||
|
||||
int mdp5_crtc_setup_pipeline(struct drm_crtc *crtc,
|
||||
struct drm_crtc_state *new_crtc_state,
|
||||
bool need_right_mixer)
|
||||
{
|
||||
struct mdp5_crtc_state *mdp5_cstate =
|
||||
to_mdp5_crtc_state(new_crtc_state);
|
||||
struct mdp5_pipeline *pipeline = &mdp5_cstate->pipeline;
|
||||
struct mdp5_interface *intf;
|
||||
bool new_mixer = false;
|
||||
|
||||
new_mixer = !pipeline->mixer;
|
||||
|
||||
if ((need_right_mixer && !pipeline->r_mixer) ||
|
||||
(!need_right_mixer && pipeline->r_mixer))
|
||||
new_mixer = true;
|
||||
|
||||
if (new_mixer) {
|
||||
struct mdp5_hw_mixer *old_mixer = pipeline->mixer;
|
||||
struct mdp5_hw_mixer *old_r_mixer = pipeline->r_mixer;
|
||||
u32 caps;
|
||||
int ret;
|
||||
|
||||
caps = MDP_LM_CAP_DISPLAY;
|
||||
if (need_right_mixer)
|
||||
caps |= MDP_LM_CAP_PAIR;
|
||||
|
||||
ret = mdp5_mixer_assign(new_crtc_state->state, crtc, caps,
|
||||
&pipeline->mixer, need_right_mixer ?
|
||||
&pipeline->r_mixer : NULL);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
mdp5_mixer_release(new_crtc_state->state, old_mixer);
|
||||
if (old_r_mixer) {
|
||||
mdp5_mixer_release(new_crtc_state->state, old_r_mixer);
|
||||
if (!need_right_mixer)
|
||||
pipeline->r_mixer = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* these should have been already set up in the encoder's atomic
|
||||
* check (called by drm_atomic_helper_check_modeset)
|
||||
*/
|
||||
intf = pipeline->intf;
|
||||
|
||||
mdp5_cstate->err_irqmask = intf2err(intf->num);
|
||||
mdp5_cstate->vblank_irqmask = intf2vblank(pipeline->mixer, intf);
|
||||
|
||||
if ((intf->type == INTF_DSI) &&
|
||||
(intf->mode == MDP5_INTF_DSI_MODE_COMMAND)) {
|
||||
mdp5_cstate->pp_done_irqmask = lm2ppdone(pipeline->mixer);
|
||||
mdp5_cstate->cmd_mode = true;
|
||||
} else {
|
||||
mdp5_cstate->pp_done_irqmask = 0;
|
||||
mdp5_cstate->cmd_mode = false;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct plane_state {
|
||||
struct drm_plane *plane;
|
||||
struct mdp5_plane_state *state;
|
||||
@ -391,6 +530,29 @@ static bool is_fullscreen(struct drm_crtc_state *cstate,
|
||||
((pstate->crtc_y + pstate->crtc_h) >= cstate->mode.vdisplay);
|
||||
}
|
||||
|
||||
enum mdp_mixer_stage_id get_start_stage(struct drm_crtc *crtc,
|
||||
struct drm_crtc_state *new_crtc_state,
|
||||
struct drm_plane_state *bpstate)
|
||||
{
|
||||
struct mdp5_crtc_state *mdp5_cstate =
|
||||
to_mdp5_crtc_state(new_crtc_state);
|
||||
|
||||
/*
|
||||
* if we're in source split mode, it's mandatory to have
|
||||
* border out on the base stage
|
||||
*/
|
||||
if (mdp5_cstate->pipeline.r_mixer)
|
||||
return STAGE0;
|
||||
|
||||
/* if the bottom-most layer is not fullscreen, we need to use
|
||||
* it for solid-color:
|
||||
*/
|
||||
if (!is_fullscreen(new_crtc_state, bpstate))
|
||||
return STAGE0;
|
||||
|
||||
return STAGE_BASE;
|
||||
}
|
||||
|
||||
static int mdp5_crtc_atomic_check(struct drm_crtc *crtc,
|
||||
struct drm_crtc_state *state)
|
||||
{
|
||||
@ -400,8 +562,12 @@ static int mdp5_crtc_atomic_check(struct drm_crtc *crtc,
|
||||
struct plane_state pstates[STAGE_MAX + 1];
|
||||
const struct mdp5_cfg_hw *hw_cfg;
|
||||
const struct drm_plane_state *pstate;
|
||||
const struct drm_display_mode *mode = &state->adjusted_mode;
|
||||
bool cursor_plane = false;
|
||||
int cnt = 0, base = 0, i;
|
||||
bool need_right_mixer = false;
|
||||
int cnt = 0, i;
|
||||
int ret;
|
||||
enum mdp_mixer_stage_id start;
|
||||
|
||||
DBG("%s: check", crtc->name);
|
||||
|
||||
@ -409,32 +575,52 @@ static int mdp5_crtc_atomic_check(struct drm_crtc *crtc,
|
||||
pstates[cnt].plane = plane;
|
||||
pstates[cnt].state = to_mdp5_plane_state(pstate);
|
||||
|
||||
/*
|
||||
* if any plane on this crtc uses 2 hwpipes, then we need
|
||||
* the crtc to have a right hwmixer.
|
||||
*/
|
||||
if (pstates[cnt].state->r_hwpipe)
|
||||
need_right_mixer = true;
|
||||
cnt++;
|
||||
|
||||
if (plane->type == DRM_PLANE_TYPE_CURSOR)
|
||||
cursor_plane = true;
|
||||
}
|
||||
|
||||
/* bail out early if there aren't any planes */
|
||||
if (!cnt)
|
||||
return 0;
|
||||
|
||||
hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
|
||||
|
||||
/*
|
||||
* we need a right hwmixer if the mode's width is greater than a single
|
||||
* LM's max width
|
||||
*/
|
||||
if (mode->hdisplay > hw_cfg->lm.max_width)
|
||||
need_right_mixer = true;
|
||||
|
||||
ret = mdp5_crtc_setup_pipeline(crtc, state, need_right_mixer);
|
||||
if (ret) {
|
||||
dev_err(dev->dev, "couldn't assign mixers %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* assign a stage based on sorted zpos property */
|
||||
sort(pstates, cnt, sizeof(pstates[0]), pstate_cmp, NULL);
|
||||
|
||||
/* if the bottom-most layer is not fullscreen, we need to use
|
||||
* it for solid-color:
|
||||
*/
|
||||
if ((cnt > 0) && !is_fullscreen(state, &pstates[0].state->base))
|
||||
base++;
|
||||
|
||||
/* trigger a warning if cursor isn't the highest zorder */
|
||||
WARN_ON(cursor_plane &&
|
||||
(pstates[cnt - 1].plane->type != DRM_PLANE_TYPE_CURSOR));
|
||||
|
||||
start = get_start_stage(crtc, state, &pstates[0].state->base);
|
||||
|
||||
/* verify that there are not too many planes attached to crtc
|
||||
* and that we don't have conflicting mixer stages:
|
||||
*/
|
||||
hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
|
||||
|
||||
if ((cnt + base) >= hw_cfg->lm.nb_stages) {
|
||||
dev_err(dev->dev, "too many planes! cnt=%d, base=%d\n", cnt, base);
|
||||
if ((cnt + start - 1) >= hw_cfg->lm.nb_stages) {
|
||||
dev_err(dev->dev, "too many planes! cnt=%d, start stage=%d\n",
|
||||
cnt, start);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@ -442,7 +628,7 @@ static int mdp5_crtc_atomic_check(struct drm_crtc *crtc,
|
||||
if (cursor_plane && (i == (cnt - 1)))
|
||||
pstates[i].state->stage = hw_cfg->lm.nb_stages;
|
||||
else
|
||||
pstates[i].state->stage = STAGE_BASE + i + base;
|
||||
pstates[i].state->stage = start + i;
|
||||
DBG("%s: assign pipe %s on stage=%d", crtc->name,
|
||||
pstates[i].plane->name,
|
||||
pstates[i].state->stage);
|
||||
@ -461,6 +647,7 @@ static void mdp5_crtc_atomic_flush(struct drm_crtc *crtc,
|
||||
struct drm_crtc_state *old_crtc_state)
|
||||
{
|
||||
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
|
||||
struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
|
||||
struct drm_device *dev = crtc->dev;
|
||||
unsigned long flags;
|
||||
|
||||
@ -477,7 +664,8 @@ static void mdp5_crtc_atomic_flush(struct drm_crtc *crtc,
|
||||
* it means we are trying to flush a CRTC whose state is disabled:
|
||||
* nothing else needs to be done.
|
||||
*/
|
||||
if (unlikely(!mdp5_crtc->ctl))
|
||||
/* XXX: Can this happen now ? */
|
||||
if (unlikely(!mdp5_cstate->ctl))
|
||||
return;
|
||||
|
||||
blend_setup(crtc);
|
||||
@ -488,11 +676,16 @@ static void mdp5_crtc_atomic_flush(struct drm_crtc *crtc,
|
||||
* This is safe because no pp_done will happen before SW trigger
|
||||
* in command mode.
|
||||
*/
|
||||
if (mdp5_crtc->cmd_mode)
|
||||
if (mdp5_cstate->cmd_mode)
|
||||
request_pp_done_pending(crtc);
|
||||
|
||||
mdp5_crtc->flushed_mask = crtc_flush_all(crtc);
|
||||
|
||||
/* XXX are we leaking out state here? */
|
||||
mdp5_crtc->vblank.irqmask = mdp5_cstate->vblank_irqmask;
|
||||
mdp5_crtc->err.irqmask = mdp5_cstate->err_irqmask;
|
||||
mdp5_crtc->pp_done.irqmask = mdp5_cstate->pp_done_irqmask;
|
||||
|
||||
request_pending(crtc, PENDING_FLIP);
|
||||
}
|
||||
|
||||
@ -527,11 +720,14 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
|
||||
uint32_t width, uint32_t height)
|
||||
{
|
||||
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
|
||||
struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
|
||||
struct mdp5_pipeline *pipeline = &mdp5_cstate->pipeline;
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct mdp5_kms *mdp5_kms = get_kms(crtc);
|
||||
struct drm_gem_object *cursor_bo, *old_bo = NULL;
|
||||
uint32_t blendcfg, stride;
|
||||
uint64_t cursor_addr;
|
||||
struct mdp5_ctl *ctl;
|
||||
int ret, lm;
|
||||
enum mdp5_cursor_alpha cur_alpha = CURSOR_ALPHA_PER_PIXEL;
|
||||
uint32_t flush_mask = mdp_ctl_flush_mask_cursor(0);
|
||||
@ -544,7 +740,12 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (NULL == mdp5_crtc->ctl)
|
||||
ctl = mdp5_cstate->ctl;
|
||||
if (!ctl)
|
||||
return -EINVAL;
|
||||
|
||||
/* don't support LM cursors when we we have source split enabled */
|
||||
if (mdp5_cstate->pipeline.r_mixer)
|
||||
return -EINVAL;
|
||||
|
||||
if (!handle) {
|
||||
@ -561,7 +762,7 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
|
||||
if (ret)
|
||||
return -EINVAL;
|
||||
|
||||
lm = mdp5_crtc->lm;
|
||||
lm = mdp5_cstate->pipeline.mixer->lm;
|
||||
stride = width * drm_format_plane_cpp(DRM_FORMAT_ARGB8888, 0);
|
||||
|
||||
spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags);
|
||||
@ -591,7 +792,7 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
|
||||
spin_unlock_irqrestore(&mdp5_crtc->cursor.lock, flags);
|
||||
|
||||
set_cursor:
|
||||
ret = mdp5_ctl_set_cursor(mdp5_crtc->ctl, 0, cursor_enable);
|
||||
ret = mdp5_ctl_set_cursor(ctl, pipeline, 0, cursor_enable);
|
||||
if (ret) {
|
||||
dev_err(dev->dev, "failed to %sable cursor: %d\n",
|
||||
cursor_enable ? "en" : "dis", ret);
|
||||
@ -613,11 +814,17 @@ static int mdp5_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
|
||||
{
|
||||
struct mdp5_kms *mdp5_kms = get_kms(crtc);
|
||||
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
|
||||
struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
|
||||
uint32_t lm = mdp5_cstate->pipeline.mixer->lm;
|
||||
uint32_t flush_mask = mdp_ctl_flush_mask_cursor(0);
|
||||
uint32_t roi_w;
|
||||
uint32_t roi_h;
|
||||
unsigned long flags;
|
||||
|
||||
/* don't support LM cursors when we we have source split enabled */
|
||||
if (mdp5_cstate->pipeline.r_mixer)
|
||||
return -EINVAL;
|
||||
|
||||
/* In case the CRTC is disabled, just drop the cursor update */
|
||||
if (unlikely(!crtc->state->enable))
|
||||
return 0;
|
||||
@ -628,10 +835,10 @@ static int mdp5_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
|
||||
get_roi(crtc, &roi_w, &roi_h);
|
||||
|
||||
spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags);
|
||||
mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_SIZE(mdp5_crtc->lm),
|
||||
mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_SIZE(lm),
|
||||
MDP5_LM_CURSOR_SIZE_ROI_H(roi_h) |
|
||||
MDP5_LM_CURSOR_SIZE_ROI_W(roi_w));
|
||||
mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_START_XY(mdp5_crtc->lm),
|
||||
mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_START_XY(lm),
|
||||
MDP5_LM_CURSOR_START_XY_Y_START(y) |
|
||||
MDP5_LM_CURSOR_START_XY_X_START(x));
|
||||
spin_unlock_irqrestore(&mdp5_crtc->cursor.lock, flags);
|
||||
@ -641,16 +848,80 @@ static int mdp5_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
mdp5_crtc_atomic_print_state(struct drm_printer *p,
|
||||
const struct drm_crtc_state *state)
|
||||
{
|
||||
struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(state);
|
||||
struct mdp5_pipeline *pipeline = &mdp5_cstate->pipeline;
|
||||
struct mdp5_kms *mdp5_kms = get_kms(state->crtc);
|
||||
|
||||
if (WARN_ON(!pipeline))
|
||||
return;
|
||||
|
||||
drm_printf(p, "\thwmixer=%s\n", pipeline->mixer ?
|
||||
pipeline->mixer->name : "(null)");
|
||||
|
||||
if (mdp5_kms->caps & MDP_CAP_SRC_SPLIT)
|
||||
drm_printf(p, "\tright hwmixer=%s\n", pipeline->r_mixer ?
|
||||
pipeline->r_mixer->name : "(null)");
|
||||
}
|
||||
|
||||
static void mdp5_crtc_reset(struct drm_crtc *crtc)
|
||||
{
|
||||
struct mdp5_crtc_state *mdp5_cstate;
|
||||
|
||||
if (crtc->state) {
|
||||
__drm_atomic_helper_crtc_destroy_state(crtc->state);
|
||||
kfree(to_mdp5_crtc_state(crtc->state));
|
||||
}
|
||||
|
||||
mdp5_cstate = kzalloc(sizeof(*mdp5_cstate), GFP_KERNEL);
|
||||
|
||||
if (mdp5_cstate) {
|
||||
mdp5_cstate->base.crtc = crtc;
|
||||
crtc->state = &mdp5_cstate->base;
|
||||
}
|
||||
}
|
||||
|
||||
static struct drm_crtc_state *
|
||||
mdp5_crtc_duplicate_state(struct drm_crtc *crtc)
|
||||
{
|
||||
struct mdp5_crtc_state *mdp5_cstate;
|
||||
|
||||
if (WARN_ON(!crtc->state))
|
||||
return NULL;
|
||||
|
||||
mdp5_cstate = kmemdup(to_mdp5_crtc_state(crtc->state),
|
||||
sizeof(*mdp5_cstate), GFP_KERNEL);
|
||||
if (!mdp5_cstate)
|
||||
return NULL;
|
||||
|
||||
__drm_atomic_helper_crtc_duplicate_state(crtc, &mdp5_cstate->base);
|
||||
|
||||
return &mdp5_cstate->base;
|
||||
}
|
||||
|
||||
static void mdp5_crtc_destroy_state(struct drm_crtc *crtc, struct drm_crtc_state *state)
|
||||
{
|
||||
struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(state);
|
||||
|
||||
__drm_atomic_helper_crtc_destroy_state(state);
|
||||
|
||||
kfree(mdp5_cstate);
|
||||
}
|
||||
|
||||
static const struct drm_crtc_funcs mdp5_crtc_funcs = {
|
||||
.set_config = drm_atomic_helper_set_config,
|
||||
.destroy = mdp5_crtc_destroy,
|
||||
.page_flip = drm_atomic_helper_page_flip,
|
||||
.set_property = drm_atomic_helper_crtc_set_property,
|
||||
.reset = drm_atomic_helper_crtc_reset,
|
||||
.atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
|
||||
.atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
|
||||
.reset = mdp5_crtc_reset,
|
||||
.atomic_duplicate_state = mdp5_crtc_duplicate_state,
|
||||
.atomic_destroy_state = mdp5_crtc_destroy_state,
|
||||
.cursor_set = mdp5_crtc_cursor_set,
|
||||
.cursor_move = mdp5_crtc_cursor_move,
|
||||
.atomic_print_state = mdp5_crtc_atomic_print_state,
|
||||
};
|
||||
|
||||
static const struct drm_crtc_funcs mdp5_crtc_no_lm_cursor_funcs = {
|
||||
@ -658,9 +929,10 @@ static const struct drm_crtc_funcs mdp5_crtc_no_lm_cursor_funcs = {
|
||||
.destroy = mdp5_crtc_destroy,
|
||||
.page_flip = drm_atomic_helper_page_flip,
|
||||
.set_property = drm_atomic_helper_crtc_set_property,
|
||||
.reset = drm_atomic_helper_crtc_reset,
|
||||
.atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
|
||||
.atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
|
||||
.reset = mdp5_crtc_reset,
|
||||
.atomic_duplicate_state = mdp5_crtc_duplicate_state,
|
||||
.atomic_destroy_state = mdp5_crtc_destroy_state,
|
||||
.atomic_print_state = mdp5_crtc_atomic_print_state,
|
||||
};
|
||||
|
||||
static const struct drm_crtc_helper_funcs mdp5_crtc_helper_funcs = {
|
||||
@ -710,22 +982,26 @@ static void mdp5_crtc_wait_for_pp_done(struct drm_crtc *crtc)
|
||||
{
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
|
||||
struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
|
||||
int ret;
|
||||
|
||||
ret = wait_for_completion_timeout(&mdp5_crtc->pp_completion,
|
||||
msecs_to_jiffies(50));
|
||||
if (ret == 0)
|
||||
dev_warn(dev->dev, "pp done time out, lm=%d\n", mdp5_crtc->lm);
|
||||
dev_warn(dev->dev, "pp done time out, lm=%d\n",
|
||||
mdp5_cstate->pipeline.mixer->lm);
|
||||
}
|
||||
|
||||
static void mdp5_crtc_wait_for_flush_done(struct drm_crtc *crtc)
|
||||
{
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
|
||||
struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
|
||||
struct mdp5_ctl *ctl = mdp5_cstate->ctl;
|
||||
int ret;
|
||||
|
||||
/* Should not call this function if crtc is disabled. */
|
||||
if (!mdp5_crtc->ctl)
|
||||
if (!ctl)
|
||||
return;
|
||||
|
||||
ret = drm_crtc_vblank_get(crtc);
|
||||
@ -733,7 +1009,7 @@ static void mdp5_crtc_wait_for_flush_done(struct drm_crtc *crtc)
|
||||
return;
|
||||
|
||||
ret = wait_event_timeout(dev->vblank[drm_crtc_index(crtc)].queue,
|
||||
((mdp5_ctl_get_commit_status(mdp5_crtc->ctl) &
|
||||
((mdp5_ctl_get_commit_status(ctl) &
|
||||
mdp5_crtc->flushed_mask) == 0),
|
||||
msecs_to_jiffies(50));
|
||||
if (ret <= 0)
|
||||
@ -750,52 +1026,54 @@ uint32_t mdp5_crtc_vblank(struct drm_crtc *crtc)
|
||||
return mdp5_crtc->vblank.irqmask;
|
||||
}
|
||||
|
||||
void mdp5_crtc_set_pipeline(struct drm_crtc *crtc,
|
||||
struct mdp5_interface *intf, struct mdp5_ctl *ctl)
|
||||
void mdp5_crtc_set_pipeline(struct drm_crtc *crtc)
|
||||
{
|
||||
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
|
||||
struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
|
||||
struct mdp5_kms *mdp5_kms = get_kms(crtc);
|
||||
int lm = mdp5_crtc_get_lm(crtc);
|
||||
|
||||
/* now that we know what irq's we want: */
|
||||
mdp5_crtc->err.irqmask = intf2err(intf->num);
|
||||
mdp5_crtc->vblank.irqmask = intf2vblank(lm, intf);
|
||||
|
||||
if ((intf->type == INTF_DSI) &&
|
||||
(intf->mode == MDP5_INTF_DSI_MODE_COMMAND)) {
|
||||
mdp5_crtc->pp_done.irqmask = lm2ppdone(lm);
|
||||
mdp5_crtc->pp_done.irq = mdp5_crtc_pp_done_irq;
|
||||
mdp5_crtc->cmd_mode = true;
|
||||
} else {
|
||||
mdp5_crtc->pp_done.irqmask = 0;
|
||||
mdp5_crtc->pp_done.irq = NULL;
|
||||
mdp5_crtc->cmd_mode = false;
|
||||
}
|
||||
|
||||
/* should this be done elsewhere ? */
|
||||
mdp_irq_update(&mdp5_kms->base);
|
||||
|
||||
mdp5_crtc->ctl = ctl;
|
||||
mdp5_ctl_set_pipeline(ctl, intf, lm);
|
||||
mdp5_ctl_set_pipeline(mdp5_cstate->ctl, &mdp5_cstate->pipeline);
|
||||
}
|
||||
|
||||
struct mdp5_ctl *mdp5_crtc_get_ctl(struct drm_crtc *crtc)
|
||||
{
|
||||
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
|
||||
struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
|
||||
|
||||
return mdp5_crtc->ctl;
|
||||
return mdp5_cstate->ctl;
|
||||
}
|
||||
|
||||
int mdp5_crtc_get_lm(struct drm_crtc *crtc)
|
||||
struct mdp5_hw_mixer *mdp5_crtc_get_mixer(struct drm_crtc *crtc)
|
||||
{
|
||||
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
|
||||
return WARN_ON(!crtc) ? -EINVAL : mdp5_crtc->lm;
|
||||
struct mdp5_crtc_state *mdp5_cstate;
|
||||
|
||||
if (WARN_ON(!crtc))
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
mdp5_cstate = to_mdp5_crtc_state(crtc->state);
|
||||
|
||||
return WARN_ON(!mdp5_cstate->pipeline.mixer) ?
|
||||
ERR_PTR(-EINVAL) : mdp5_cstate->pipeline.mixer;
|
||||
}
|
||||
|
||||
struct mdp5_pipeline *mdp5_crtc_get_pipeline(struct drm_crtc *crtc)
|
||||
{
|
||||
struct mdp5_crtc_state *mdp5_cstate;
|
||||
|
||||
if (WARN_ON(!crtc))
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
mdp5_cstate = to_mdp5_crtc_state(crtc->state);
|
||||
|
||||
return &mdp5_cstate->pipeline;
|
||||
}
|
||||
|
||||
void mdp5_crtc_wait_for_commit_done(struct drm_crtc *crtc)
|
||||
{
|
||||
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
|
||||
struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
|
||||
|
||||
if (mdp5_crtc->cmd_mode)
|
||||
if (mdp5_cstate->cmd_mode)
|
||||
mdp5_crtc_wait_for_pp_done(crtc);
|
||||
else
|
||||
mdp5_crtc_wait_for_flush_done(crtc);
|
||||
@ -816,7 +1094,6 @@ struct drm_crtc *mdp5_crtc_init(struct drm_device *dev,
|
||||
crtc = &mdp5_crtc->base;
|
||||
|
||||
mdp5_crtc->id = id;
|
||||
mdp5_crtc->lm = GET_LM_ID(id);
|
||||
|
||||
spin_lock_init(&mdp5_crtc->lm_lock);
|
||||
spin_lock_init(&mdp5_crtc->cursor.lock);
|
||||
@ -824,6 +1101,7 @@ struct drm_crtc *mdp5_crtc_init(struct drm_device *dev,
|
||||
|
||||
mdp5_crtc->vblank.irq = mdp5_crtc_vblank_irq;
|
||||
mdp5_crtc->err.irq = mdp5_crtc_err_irq;
|
||||
mdp5_crtc->pp_done.irq = mdp5_crtc_pp_done_irq;
|
||||
|
||||
if (cursor_plane)
|
||||
drm_crtc_init_with_planes(dev, crtc, plane, cursor_plane,
|
||||
|
@ -32,24 +32,16 @@
|
||||
#define CTL_STAT_BUSY 0x1
|
||||
#define CTL_STAT_BOOKED 0x2
|
||||
|
||||
struct op_mode {
|
||||
struct mdp5_interface intf;
|
||||
|
||||
bool encoder_enabled;
|
||||
uint32_t start_mask;
|
||||
};
|
||||
|
||||
struct mdp5_ctl {
|
||||
struct mdp5_ctl_manager *ctlm;
|
||||
|
||||
u32 id;
|
||||
int lm;
|
||||
|
||||
/* CTL status bitmask */
|
||||
u32 status;
|
||||
|
||||
/* Operation Mode Configuration for the Pipeline */
|
||||
struct op_mode pipeline;
|
||||
bool encoder_enabled;
|
||||
uint32_t start_mask;
|
||||
|
||||
/* REG_MDP5_CTL_*(<id>) registers access info + lock: */
|
||||
spinlock_t hw_lock;
|
||||
@ -146,9 +138,10 @@ static void set_display_intf(struct mdp5_kms *mdp5_kms,
|
||||
spin_unlock_irqrestore(&mdp5_kms->resource_lock, flags);
|
||||
}
|
||||
|
||||
static void set_ctl_op(struct mdp5_ctl *ctl, struct mdp5_interface *intf)
|
||||
static void set_ctl_op(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline)
|
||||
{
|
||||
unsigned long flags;
|
||||
struct mdp5_interface *intf = pipeline->intf;
|
||||
u32 ctl_op = 0;
|
||||
|
||||
if (!mdp5_cfg_intf_is_virtual(intf->type))
|
||||
@ -169,52 +162,50 @@ static void set_ctl_op(struct mdp5_ctl *ctl, struct mdp5_interface *intf)
|
||||
break;
|
||||
}
|
||||
|
||||
if (pipeline->r_mixer)
|
||||
ctl_op |= MDP5_CTL_OP_PACK_3D_ENABLE |
|
||||
MDP5_CTL_OP_PACK_3D(1);
|
||||
|
||||
spin_lock_irqsave(&ctl->hw_lock, flags);
|
||||
ctl_write(ctl, REG_MDP5_CTL_OP(ctl->id), ctl_op);
|
||||
spin_unlock_irqrestore(&ctl->hw_lock, flags);
|
||||
}
|
||||
|
||||
int mdp5_ctl_set_pipeline(struct mdp5_ctl *ctl,
|
||||
struct mdp5_interface *intf, int lm)
|
||||
int mdp5_ctl_set_pipeline(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline)
|
||||
{
|
||||
struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
|
||||
struct mdp5_kms *mdp5_kms = get_kms(ctl_mgr);
|
||||
struct mdp5_interface *intf = pipeline->intf;
|
||||
struct mdp5_hw_mixer *mixer = pipeline->mixer;
|
||||
struct mdp5_hw_mixer *r_mixer = pipeline->r_mixer;
|
||||
|
||||
if (unlikely(WARN_ON(intf->num != ctl->pipeline.intf.num))) {
|
||||
dev_err(mdp5_kms->dev->dev,
|
||||
"CTL %d is allocated by INTF %d, but used by INTF %d\n",
|
||||
ctl->id, ctl->pipeline.intf.num, intf->num);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ctl->lm = lm;
|
||||
|
||||
memcpy(&ctl->pipeline.intf, intf, sizeof(*intf));
|
||||
|
||||
ctl->pipeline.start_mask = mdp_ctl_flush_mask_lm(ctl->lm) |
|
||||
mdp_ctl_flush_mask_encoder(intf);
|
||||
ctl->start_mask = mdp_ctl_flush_mask_lm(mixer->lm) |
|
||||
mdp_ctl_flush_mask_encoder(intf);
|
||||
if (r_mixer)
|
||||
ctl->start_mask |= mdp_ctl_flush_mask_lm(r_mixer->lm);
|
||||
|
||||
/* Virtual interfaces need not set a display intf (e.g.: Writeback) */
|
||||
if (!mdp5_cfg_intf_is_virtual(intf->type))
|
||||
set_display_intf(mdp5_kms, intf);
|
||||
|
||||
set_ctl_op(ctl, intf);
|
||||
set_ctl_op(ctl, pipeline);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool start_signal_needed(struct mdp5_ctl *ctl)
|
||||
static bool start_signal_needed(struct mdp5_ctl *ctl,
|
||||
struct mdp5_pipeline *pipeline)
|
||||
{
|
||||
struct op_mode *pipeline = &ctl->pipeline;
|
||||
struct mdp5_interface *intf = pipeline->intf;
|
||||
|
||||
if (!pipeline->encoder_enabled || pipeline->start_mask != 0)
|
||||
if (!ctl->encoder_enabled || ctl->start_mask != 0)
|
||||
return false;
|
||||
|
||||
switch (pipeline->intf.type) {
|
||||
switch (intf->type) {
|
||||
case INTF_WB:
|
||||
return true;
|
||||
case INTF_DSI:
|
||||
return pipeline->intf.mode == MDP5_INTF_DSI_MODE_COMMAND;
|
||||
return intf->mode == MDP5_INTF_DSI_MODE_COMMAND;
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
@ -236,19 +227,23 @@ static void send_start_signal(struct mdp5_ctl *ctl)
|
||||
spin_unlock_irqrestore(&ctl->hw_lock, flags);
|
||||
}
|
||||
|
||||
static void refill_start_mask(struct mdp5_ctl *ctl)
|
||||
static void refill_start_mask(struct mdp5_ctl *ctl,
|
||||
struct mdp5_pipeline *pipeline)
|
||||
{
|
||||
struct op_mode *pipeline = &ctl->pipeline;
|
||||
struct mdp5_interface *intf = &ctl->pipeline.intf;
|
||||
struct mdp5_interface *intf = pipeline->intf;
|
||||
struct mdp5_hw_mixer *mixer = pipeline->mixer;
|
||||
struct mdp5_hw_mixer *r_mixer = pipeline->r_mixer;
|
||||
|
||||
pipeline->start_mask = mdp_ctl_flush_mask_lm(ctl->lm);
|
||||
ctl->start_mask = mdp_ctl_flush_mask_lm(mixer->lm);
|
||||
if (r_mixer)
|
||||
ctl->start_mask |= mdp_ctl_flush_mask_lm(r_mixer->lm);
|
||||
|
||||
/*
|
||||
* Writeback encoder needs to program & flush
|
||||
* address registers for each page flip..
|
||||
*/
|
||||
if (intf->type == INTF_WB)
|
||||
pipeline->start_mask |= mdp_ctl_flush_mask_encoder(intf);
|
||||
ctl->start_mask |= mdp_ctl_flush_mask_encoder(intf);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -259,17 +254,21 @@ static void refill_start_mask(struct mdp5_ctl *ctl)
|
||||
* Note:
|
||||
* This encoder state is needed to trigger START signal (data path kickoff).
|
||||
*/
|
||||
int mdp5_ctl_set_encoder_state(struct mdp5_ctl *ctl, bool enabled)
|
||||
int mdp5_ctl_set_encoder_state(struct mdp5_ctl *ctl,
|
||||
struct mdp5_pipeline *pipeline,
|
||||
bool enabled)
|
||||
{
|
||||
struct mdp5_interface *intf = pipeline->intf;
|
||||
|
||||
if (WARN_ON(!ctl))
|
||||
return -EINVAL;
|
||||
|
||||
ctl->pipeline.encoder_enabled = enabled;
|
||||
DBG("intf_%d: %s", ctl->pipeline.intf.num, enabled ? "on" : "off");
|
||||
ctl->encoder_enabled = enabled;
|
||||
DBG("intf_%d: %s", intf->num, enabled ? "on" : "off");
|
||||
|
||||
if (start_signal_needed(ctl)) {
|
||||
if (start_signal_needed(ctl, pipeline)) {
|
||||
send_start_signal(ctl);
|
||||
refill_start_mask(ctl);
|
||||
refill_start_mask(ctl, pipeline);
|
||||
}
|
||||
|
||||
return 0;
|
||||
@ -280,29 +279,35 @@ int mdp5_ctl_set_encoder_state(struct mdp5_ctl *ctl, bool enabled)
|
||||
* CTL registers need to be flushed after calling this function
|
||||
* (call mdp5_ctl_commit() with mdp_ctl_flush_mask_ctl() mask)
|
||||
*/
|
||||
int mdp5_ctl_set_cursor(struct mdp5_ctl *ctl, int cursor_id, bool enable)
|
||||
int mdp5_ctl_set_cursor(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline,
|
||||
int cursor_id, bool enable)
|
||||
{
|
||||
struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
|
||||
unsigned long flags;
|
||||
u32 blend_cfg;
|
||||
int lm = ctl->lm;
|
||||
struct mdp5_hw_mixer *mixer = pipeline->mixer;
|
||||
|
||||
if (unlikely(WARN_ON(lm < 0))) {
|
||||
dev_err(ctl_mgr->dev->dev, "CTL %d cannot find LM: %d",
|
||||
ctl->id, lm);
|
||||
if (unlikely(WARN_ON(!mixer))) {
|
||||
dev_err(ctl_mgr->dev->dev, "CTL %d cannot find LM",
|
||||
ctl->id);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (pipeline->r_mixer) {
|
||||
dev_err(ctl_mgr->dev->dev, "unsupported configuration");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&ctl->hw_lock, flags);
|
||||
|
||||
blend_cfg = ctl_read(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, lm));
|
||||
blend_cfg = ctl_read(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, mixer->lm));
|
||||
|
||||
if (enable)
|
||||
blend_cfg |= MDP5_CTL_LAYER_REG_CURSOR_OUT;
|
||||
else
|
||||
blend_cfg &= ~MDP5_CTL_LAYER_REG_CURSOR_OUT;
|
||||
|
||||
ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, lm), blend_cfg);
|
||||
ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, mixer->lm), blend_cfg);
|
||||
ctl->cursor_on = enable;
|
||||
|
||||
spin_unlock_irqrestore(&ctl->hw_lock, flags);
|
||||
@ -355,37 +360,88 @@ static u32 mdp_ctl_blend_ext_mask(enum mdp5_pipe pipe,
|
||||
}
|
||||
}
|
||||
|
||||
int mdp5_ctl_blend(struct mdp5_ctl *ctl, enum mdp5_pipe *stage, u32 stage_cnt,
|
||||
u32 ctl_blend_op_flags)
|
||||
static void mdp5_ctl_reset_blend_regs(struct mdp5_ctl *ctl)
|
||||
{
|
||||
unsigned long flags;
|
||||
struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
|
||||
int i;
|
||||
|
||||
spin_lock_irqsave(&ctl->hw_lock, flags);
|
||||
|
||||
for (i = 0; i < ctl_mgr->nlm; i++) {
|
||||
ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, i), 0x0);
|
||||
ctl_write(ctl, REG_MDP5_CTL_LAYER_EXT_REG(ctl->id, i), 0x0);
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&ctl->hw_lock, flags);
|
||||
}
|
||||
|
||||
#define PIPE_LEFT 0
|
||||
#define PIPE_RIGHT 1
|
||||
int mdp5_ctl_blend(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline,
|
||||
enum mdp5_pipe stage[][MAX_PIPE_STAGE],
|
||||
enum mdp5_pipe r_stage[][MAX_PIPE_STAGE],
|
||||
u32 stage_cnt, u32 ctl_blend_op_flags)
|
||||
{
|
||||
struct mdp5_hw_mixer *mixer = pipeline->mixer;
|
||||
struct mdp5_hw_mixer *r_mixer = pipeline->r_mixer;
|
||||
unsigned long flags;
|
||||
u32 blend_cfg = 0, blend_ext_cfg = 0;
|
||||
u32 r_blend_cfg = 0, r_blend_ext_cfg = 0;
|
||||
int i, start_stage;
|
||||
|
||||
mdp5_ctl_reset_blend_regs(ctl);
|
||||
|
||||
if (ctl_blend_op_flags & MDP5_CTL_BLEND_OP_FLAG_BORDER_OUT) {
|
||||
start_stage = STAGE0;
|
||||
blend_cfg |= MDP5_CTL_LAYER_REG_BORDER_COLOR;
|
||||
if (r_mixer)
|
||||
r_blend_cfg |= MDP5_CTL_LAYER_REG_BORDER_COLOR;
|
||||
} else {
|
||||
start_stage = STAGE_BASE;
|
||||
}
|
||||
|
||||
for (i = start_stage; stage_cnt && i <= STAGE_MAX; i++) {
|
||||
blend_cfg |= mdp_ctl_blend_mask(stage[i], i);
|
||||
blend_ext_cfg |= mdp_ctl_blend_ext_mask(stage[i], i);
|
||||
blend_cfg |=
|
||||
mdp_ctl_blend_mask(stage[i][PIPE_LEFT], i) |
|
||||
mdp_ctl_blend_mask(stage[i][PIPE_RIGHT], i);
|
||||
blend_ext_cfg |=
|
||||
mdp_ctl_blend_ext_mask(stage[i][PIPE_LEFT], i) |
|
||||
mdp_ctl_blend_ext_mask(stage[i][PIPE_RIGHT], i);
|
||||
if (r_mixer) {
|
||||
r_blend_cfg |=
|
||||
mdp_ctl_blend_mask(r_stage[i][PIPE_LEFT], i) |
|
||||
mdp_ctl_blend_mask(r_stage[i][PIPE_RIGHT], i);
|
||||
r_blend_ext_cfg |=
|
||||
mdp_ctl_blend_ext_mask(r_stage[i][PIPE_LEFT], i) |
|
||||
mdp_ctl_blend_ext_mask(r_stage[i][PIPE_RIGHT], i);
|
||||
}
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&ctl->hw_lock, flags);
|
||||
if (ctl->cursor_on)
|
||||
blend_cfg |= MDP5_CTL_LAYER_REG_CURSOR_OUT;
|
||||
|
||||
ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, ctl->lm), blend_cfg);
|
||||
ctl_write(ctl, REG_MDP5_CTL_LAYER_EXT_REG(ctl->id, ctl->lm), blend_ext_cfg);
|
||||
ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, mixer->lm), blend_cfg);
|
||||
ctl_write(ctl, REG_MDP5_CTL_LAYER_EXT_REG(ctl->id, mixer->lm),
|
||||
blend_ext_cfg);
|
||||
if (r_mixer) {
|
||||
ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, r_mixer->lm),
|
||||
r_blend_cfg);
|
||||
ctl_write(ctl, REG_MDP5_CTL_LAYER_EXT_REG(ctl->id, r_mixer->lm),
|
||||
r_blend_ext_cfg);
|
||||
}
|
||||
spin_unlock_irqrestore(&ctl->hw_lock, flags);
|
||||
|
||||
ctl->pending_ctl_trigger = mdp_ctl_flush_mask_lm(ctl->lm);
|
||||
ctl->pending_ctl_trigger = mdp_ctl_flush_mask_lm(mixer->lm);
|
||||
if (r_mixer)
|
||||
ctl->pending_ctl_trigger |= mdp_ctl_flush_mask_lm(r_mixer->lm);
|
||||
|
||||
DBG("lm%d: blend config = 0x%08x. ext_cfg = 0x%08x", ctl->lm,
|
||||
DBG("lm%d: blend config = 0x%08x. ext_cfg = 0x%08x", mixer->lm,
|
||||
blend_cfg, blend_ext_cfg);
|
||||
if (r_mixer)
|
||||
DBG("lm%d: blend config = 0x%08x. ext_cfg = 0x%08x",
|
||||
r_mixer->lm, r_blend_cfg, r_blend_ext_cfg);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -443,7 +499,8 @@ u32 mdp_ctl_flush_mask_lm(int lm)
|
||||
}
|
||||
}
|
||||
|
||||
static u32 fix_sw_flush(struct mdp5_ctl *ctl, u32 flush_mask)
|
||||
static u32 fix_sw_flush(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline,
|
||||
u32 flush_mask)
|
||||
{
|
||||
struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
|
||||
u32 sw_mask = 0;
|
||||
@ -452,7 +509,7 @@ static u32 fix_sw_flush(struct mdp5_ctl *ctl, u32 flush_mask)
|
||||
|
||||
/* for some targets, cursor bit is the same as LM bit */
|
||||
if (BIT_NEEDS_SW_FIX(MDP5_CTL_FLUSH_CURSOR_0))
|
||||
sw_mask |= mdp_ctl_flush_mask_lm(ctl->lm);
|
||||
sw_mask |= mdp_ctl_flush_mask_lm(pipeline->mixer->lm);
|
||||
|
||||
return sw_mask;
|
||||
}
|
||||
@ -498,25 +555,26 @@ static void fix_for_single_flush(struct mdp5_ctl *ctl, u32 *flush_mask,
|
||||
*
|
||||
* Return H/W flushed bit mask.
|
||||
*/
|
||||
u32 mdp5_ctl_commit(struct mdp5_ctl *ctl, u32 flush_mask)
|
||||
u32 mdp5_ctl_commit(struct mdp5_ctl *ctl,
|
||||
struct mdp5_pipeline *pipeline,
|
||||
u32 flush_mask)
|
||||
{
|
||||
struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
|
||||
struct op_mode *pipeline = &ctl->pipeline;
|
||||
unsigned long flags;
|
||||
u32 flush_id = ctl->id;
|
||||
u32 curr_ctl_flush_mask;
|
||||
|
||||
pipeline->start_mask &= ~flush_mask;
|
||||
ctl->start_mask &= ~flush_mask;
|
||||
|
||||
VERB("flush_mask=%x, start_mask=%x, trigger=%x", flush_mask,
|
||||
pipeline->start_mask, ctl->pending_ctl_trigger);
|
||||
ctl->start_mask, ctl->pending_ctl_trigger);
|
||||
|
||||
if (ctl->pending_ctl_trigger & flush_mask) {
|
||||
flush_mask |= MDP5_CTL_FLUSH_CTL;
|
||||
ctl->pending_ctl_trigger = 0;
|
||||
}
|
||||
|
||||
flush_mask |= fix_sw_flush(ctl, flush_mask);
|
||||
flush_mask |= fix_sw_flush(ctl, pipeline, flush_mask);
|
||||
|
||||
flush_mask &= ctl_mgr->flush_hw_mask;
|
||||
|
||||
@ -530,9 +588,9 @@ u32 mdp5_ctl_commit(struct mdp5_ctl *ctl, u32 flush_mask)
|
||||
spin_unlock_irqrestore(&ctl->hw_lock, flags);
|
||||
}
|
||||
|
||||
if (start_signal_needed(ctl)) {
|
||||
if (start_signal_needed(ctl, pipeline)) {
|
||||
send_start_signal(ctl);
|
||||
refill_start_mask(ctl);
|
||||
refill_start_mask(ctl, pipeline);
|
||||
}
|
||||
|
||||
return curr_ctl_flush_mask;
|
||||
@ -619,8 +677,6 @@ struct mdp5_ctl *mdp5_ctlm_request(struct mdp5_ctl_manager *ctl_mgr,
|
||||
|
||||
found:
|
||||
ctl = &ctl_mgr->ctls[c];
|
||||
ctl->pipeline.intf.num = intf_num;
|
||||
ctl->lm = -1;
|
||||
ctl->status |= CTL_STAT_BUSY;
|
||||
ctl->pending_ctl_trigger = 0;
|
||||
DBG("CTL %d allocated", ctl->id);
|
||||
|
@ -37,13 +37,17 @@ struct mdp5_ctl *mdp5_ctlm_request(struct mdp5_ctl_manager *ctlm, int intf_num);
|
||||
int mdp5_ctl_get_ctl_id(struct mdp5_ctl *ctl);
|
||||
|
||||
struct mdp5_interface;
|
||||
int mdp5_ctl_set_pipeline(struct mdp5_ctl *ctl, struct mdp5_interface *intf,
|
||||
int lm);
|
||||
int mdp5_ctl_set_encoder_state(struct mdp5_ctl *ctl, bool enabled);
|
||||
struct mdp5_pipeline;
|
||||
int mdp5_ctl_set_pipeline(struct mdp5_ctl *ctl, struct mdp5_pipeline *p);
|
||||
int mdp5_ctl_set_encoder_state(struct mdp5_ctl *ctl, struct mdp5_pipeline *p,
|
||||
bool enabled);
|
||||
|
||||
int mdp5_ctl_set_cursor(struct mdp5_ctl *ctl, int cursor_id, bool enable);
|
||||
int mdp5_ctl_set_cursor(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline,
|
||||
int cursor_id, bool enable);
|
||||
int mdp5_ctl_pair(struct mdp5_ctl *ctlx, struct mdp5_ctl *ctly, bool enable);
|
||||
|
||||
#define MAX_PIPE_STAGE 2
|
||||
|
||||
/*
|
||||
* mdp5_ctl_blend() - Blend multiple layers on a Layer Mixer (LM)
|
||||
*
|
||||
@ -56,8 +60,10 @@ int mdp5_ctl_pair(struct mdp5_ctl *ctlx, struct mdp5_ctl *ctly, bool enable);
|
||||
* (call mdp5_ctl_commit() with mdp_ctl_flush_mask_ctl() mask)
|
||||
*/
|
||||
#define MDP5_CTL_BLEND_OP_FLAG_BORDER_OUT BIT(0)
|
||||
int mdp5_ctl_blend(struct mdp5_ctl *ctl, enum mdp5_pipe *stage, u32 stage_cnt,
|
||||
u32 ctl_blend_op_flags);
|
||||
int mdp5_ctl_blend(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline,
|
||||
enum mdp5_pipe stage[][MAX_PIPE_STAGE],
|
||||
enum mdp5_pipe r_stage[][MAX_PIPE_STAGE],
|
||||
u32 stage_cnt, u32 ctl_blend_op_flags);
|
||||
|
||||
/**
|
||||
* mdp_ctl_flush_mask...() - Register FLUSH masks
|
||||
@ -71,7 +77,8 @@ u32 mdp_ctl_flush_mask_cursor(int cursor_id);
|
||||
u32 mdp_ctl_flush_mask_encoder(struct mdp5_interface *intf);
|
||||
|
||||
/* @flush_mask: see CTL flush masks definitions below */
|
||||
u32 mdp5_ctl_commit(struct mdp5_ctl *ctl, u32 flush_mask);
|
||||
u32 mdp5_ctl_commit(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline,
|
||||
u32 flush_mask);
|
||||
u32 mdp5_ctl_get_commit_status(struct mdp5_ctl *ctl);
|
||||
|
||||
|
||||
|
@ -109,7 +109,7 @@ static void mdp5_vid_encoder_mode_set(struct drm_encoder *encoder,
|
||||
struct mdp5_kms *mdp5_kms = get_kms(encoder);
|
||||
struct drm_device *dev = encoder->dev;
|
||||
struct drm_connector *connector;
|
||||
int intf = mdp5_encoder->intf.num;
|
||||
int intf = mdp5_encoder->intf->num;
|
||||
uint32_t dtv_hsync_skew, vsync_period, vsync_len, ctrl_pol;
|
||||
uint32_t display_v_start, display_v_end;
|
||||
uint32_t hsync_start_x, hsync_end_x;
|
||||
@ -130,7 +130,7 @@ static void mdp5_vid_encoder_mode_set(struct drm_encoder *encoder,
|
||||
ctrl_pol = 0;
|
||||
|
||||
/* DSI controller cannot handle active-low sync signals. */
|
||||
if (mdp5_encoder->intf.type != INTF_DSI) {
|
||||
if (mdp5_encoder->intf->type != INTF_DSI) {
|
||||
if (mode->flags & DRM_MODE_FLAG_NHSYNC)
|
||||
ctrl_pol |= MDP5_INTF_POLARITY_CTL_HSYNC_LOW;
|
||||
if (mode->flags & DRM_MODE_FLAG_NVSYNC)
|
||||
@ -175,7 +175,7 @@ static void mdp5_vid_encoder_mode_set(struct drm_encoder *encoder,
|
||||
* DISPLAY_V_START = (VBP * HCYCLE) + HBP
|
||||
* DISPLAY_V_END = (VBP + VACTIVE) * HCYCLE - 1 - HFP
|
||||
*/
|
||||
if (mdp5_encoder->intf.type == INTF_eDP) {
|
||||
if (mdp5_encoder->intf->type == INTF_eDP) {
|
||||
display_v_start += mode->htotal - mode->hsync_start;
|
||||
display_v_end -= mode->hsync_start - mode->hdisplay;
|
||||
}
|
||||
@ -206,8 +206,7 @@ static void mdp5_vid_encoder_mode_set(struct drm_encoder *encoder,
|
||||
|
||||
spin_unlock_irqrestore(&mdp5_encoder->intf_lock, flags);
|
||||
|
||||
mdp5_crtc_set_pipeline(encoder->crtc, &mdp5_encoder->intf,
|
||||
mdp5_encoder->ctl);
|
||||
mdp5_crtc_set_pipeline(encoder->crtc);
|
||||
}
|
||||
|
||||
static void mdp5_vid_encoder_disable(struct drm_encoder *encoder)
|
||||
@ -215,20 +214,21 @@ static void mdp5_vid_encoder_disable(struct drm_encoder *encoder)
|
||||
struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
|
||||
struct mdp5_kms *mdp5_kms = get_kms(encoder);
|
||||
struct mdp5_ctl *ctl = mdp5_encoder->ctl;
|
||||
int lm = mdp5_crtc_get_lm(encoder->crtc);
|
||||
struct mdp5_interface *intf = &mdp5_encoder->intf;
|
||||
int intfn = mdp5_encoder->intf.num;
|
||||
struct mdp5_pipeline *pipeline = mdp5_crtc_get_pipeline(encoder->crtc);
|
||||
struct mdp5_hw_mixer *mixer = mdp5_crtc_get_mixer(encoder->crtc);
|
||||
struct mdp5_interface *intf = mdp5_encoder->intf;
|
||||
int intfn = mdp5_encoder->intf->num;
|
||||
unsigned long flags;
|
||||
|
||||
if (WARN_ON(!mdp5_encoder->enabled))
|
||||
return;
|
||||
|
||||
mdp5_ctl_set_encoder_state(ctl, false);
|
||||
mdp5_ctl_set_encoder_state(ctl, pipeline, false);
|
||||
|
||||
spin_lock_irqsave(&mdp5_encoder->intf_lock, flags);
|
||||
mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(intfn), 0);
|
||||
spin_unlock_irqrestore(&mdp5_encoder->intf_lock, flags);
|
||||
mdp5_ctl_commit(ctl, mdp_ctl_flush_mask_encoder(intf));
|
||||
mdp5_ctl_commit(ctl, pipeline, mdp_ctl_flush_mask_encoder(intf));
|
||||
|
||||
/*
|
||||
* Wait for a vsync so we know the ENABLE=0 latched before
|
||||
@ -238,7 +238,7 @@ static void mdp5_vid_encoder_disable(struct drm_encoder *encoder)
|
||||
* the settings changes for the new modeset (like new
|
||||
* scanout buffer) don't latch properly..
|
||||
*/
|
||||
mdp_irq_wait(&mdp5_kms->base, intf2vblank(lm, intf));
|
||||
mdp_irq_wait(&mdp5_kms->base, intf2vblank(mixer, intf));
|
||||
|
||||
bs_set(mdp5_encoder, 0);
|
||||
|
||||
@ -250,8 +250,9 @@ static void mdp5_vid_encoder_enable(struct drm_encoder *encoder)
|
||||
struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
|
||||
struct mdp5_kms *mdp5_kms = get_kms(encoder);
|
||||
struct mdp5_ctl *ctl = mdp5_encoder->ctl;
|
||||
struct mdp5_interface *intf = &mdp5_encoder->intf;
|
||||
int intfn = mdp5_encoder->intf.num;
|
||||
struct mdp5_interface *intf = mdp5_encoder->intf;
|
||||
struct mdp5_pipeline *pipeline = mdp5_crtc_get_pipeline(encoder->crtc);
|
||||
int intfn = intf->num;
|
||||
unsigned long flags;
|
||||
|
||||
if (WARN_ON(mdp5_encoder->enabled))
|
||||
@ -261,9 +262,9 @@ static void mdp5_vid_encoder_enable(struct drm_encoder *encoder)
|
||||
spin_lock_irqsave(&mdp5_encoder->intf_lock, flags);
|
||||
mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(intfn), 1);
|
||||
spin_unlock_irqrestore(&mdp5_encoder->intf_lock, flags);
|
||||
mdp5_ctl_commit(ctl, mdp_ctl_flush_mask_encoder(intf));
|
||||
mdp5_ctl_commit(ctl, pipeline, mdp_ctl_flush_mask_encoder(intf));
|
||||
|
||||
mdp5_ctl_set_encoder_state(ctl, true);
|
||||
mdp5_ctl_set_encoder_state(ctl, pipeline, true);
|
||||
|
||||
mdp5_encoder->enabled = true;
|
||||
}
|
||||
@ -273,7 +274,7 @@ static void mdp5_encoder_mode_set(struct drm_encoder *encoder,
|
||||
struct drm_display_mode *adjusted_mode)
|
||||
{
|
||||
struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
|
||||
struct mdp5_interface *intf = &mdp5_encoder->intf;
|
||||
struct mdp5_interface *intf = mdp5_encoder->intf;
|
||||
|
||||
if (intf->mode == MDP5_INTF_DSI_MODE_COMMAND)
|
||||
mdp5_cmd_encoder_mode_set(encoder, mode, adjusted_mode);
|
||||
@ -284,7 +285,7 @@ static void mdp5_encoder_mode_set(struct drm_encoder *encoder,
|
||||
static void mdp5_encoder_disable(struct drm_encoder *encoder)
|
||||
{
|
||||
struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
|
||||
struct mdp5_interface *intf = &mdp5_encoder->intf;
|
||||
struct mdp5_interface *intf = mdp5_encoder->intf;
|
||||
|
||||
if (intf->mode == MDP5_INTF_DSI_MODE_COMMAND)
|
||||
mdp5_cmd_encoder_disable(encoder);
|
||||
@ -295,7 +296,7 @@ static void mdp5_encoder_disable(struct drm_encoder *encoder)
|
||||
static void mdp5_encoder_enable(struct drm_encoder *encoder)
|
||||
{
|
||||
struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
|
||||
struct mdp5_interface *intf = &mdp5_encoder->intf;
|
||||
struct mdp5_interface *intf = mdp5_encoder->intf;
|
||||
|
||||
if (intf->mode == MDP5_INTF_DSI_MODE_COMMAND)
|
||||
mdp5_cmd_encoder_disable(encoder);
|
||||
@ -303,17 +304,33 @@ static void mdp5_encoder_enable(struct drm_encoder *encoder)
|
||||
mdp5_vid_encoder_enable(encoder);
|
||||
}
|
||||
|
||||
static int mdp5_encoder_atomic_check(struct drm_encoder *encoder,
|
||||
struct drm_crtc_state *crtc_state,
|
||||
struct drm_connector_state *conn_state)
|
||||
{
|
||||
struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
|
||||
struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc_state);
|
||||
struct mdp5_interface *intf = mdp5_encoder->intf;
|
||||
struct mdp5_ctl *ctl = mdp5_encoder->ctl;
|
||||
|
||||
mdp5_cstate->ctl = ctl;
|
||||
mdp5_cstate->pipeline.intf = intf;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct drm_encoder_helper_funcs mdp5_encoder_helper_funcs = {
|
||||
.mode_set = mdp5_encoder_mode_set,
|
||||
.disable = mdp5_encoder_disable,
|
||||
.enable = mdp5_encoder_enable,
|
||||
.atomic_check = mdp5_encoder_atomic_check,
|
||||
};
|
||||
|
||||
int mdp5_encoder_get_linecount(struct drm_encoder *encoder)
|
||||
{
|
||||
struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
|
||||
struct mdp5_kms *mdp5_kms = get_kms(encoder);
|
||||
int intf = mdp5_encoder->intf.num;
|
||||
int intf = mdp5_encoder->intf->num;
|
||||
|
||||
return mdp5_read(mdp5_kms, REG_MDP5_INTF_LINE_COUNT(intf));
|
||||
}
|
||||
@ -322,7 +339,7 @@ u32 mdp5_encoder_get_framecount(struct drm_encoder *encoder)
|
||||
{
|
||||
struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
|
||||
struct mdp5_kms *mdp5_kms = get_kms(encoder);
|
||||
int intf = mdp5_encoder->intf.num;
|
||||
int intf = mdp5_encoder->intf->num;
|
||||
|
||||
return mdp5_read(mdp5_kms, REG_MDP5_INTF_FRAME_COUNT(intf));
|
||||
}
|
||||
@ -340,7 +357,7 @@ int mdp5_vid_encoder_set_split_display(struct drm_encoder *encoder,
|
||||
return -EINVAL;
|
||||
|
||||
mdp5_kms = get_kms(encoder);
|
||||
intf_num = mdp5_encoder->intf.num;
|
||||
intf_num = mdp5_encoder->intf->num;
|
||||
|
||||
/* Switch slave encoder's TimingGen Sync mode,
|
||||
* to use the master's enable signal for the slave encoder.
|
||||
@ -369,7 +386,7 @@ int mdp5_vid_encoder_set_split_display(struct drm_encoder *encoder,
|
||||
void mdp5_encoder_set_intf_mode(struct drm_encoder *encoder, bool cmd_mode)
|
||||
{
|
||||
struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
|
||||
struct mdp5_interface *intf = &mdp5_encoder->intf;
|
||||
struct mdp5_interface *intf = mdp5_encoder->intf;
|
||||
|
||||
/* TODO: Expand this to set writeback modes too */
|
||||
if (cmd_mode) {
|
||||
@ -385,7 +402,8 @@ void mdp5_encoder_set_intf_mode(struct drm_encoder *encoder, bool cmd_mode)
|
||||
|
||||
/* initialize encoder */
|
||||
struct drm_encoder *mdp5_encoder_init(struct drm_device *dev,
|
||||
struct mdp5_interface *intf, struct mdp5_ctl *ctl)
|
||||
struct mdp5_interface *intf,
|
||||
struct mdp5_ctl *ctl)
|
||||
{
|
||||
struct drm_encoder *encoder = NULL;
|
||||
struct mdp5_encoder *mdp5_encoder;
|
||||
@ -399,9 +417,9 @@ struct drm_encoder *mdp5_encoder_init(struct drm_device *dev,
|
||||
goto fail;
|
||||
}
|
||||
|
||||
memcpy(&mdp5_encoder->intf, intf, sizeof(mdp5_encoder->intf));
|
||||
encoder = &mdp5_encoder->base;
|
||||
mdp5_encoder->ctl = ctl;
|
||||
mdp5_encoder->intf = intf;
|
||||
|
||||
spin_lock_init(&mdp5_encoder->intf_lock);
|
||||
|
||||
|
@ -93,6 +93,7 @@ struct mdp5_state *mdp5_get_state(struct drm_atomic_state *s)
|
||||
|
||||
/* Copy state: */
|
||||
new_state->hwpipe = mdp5_kms->state->hwpipe;
|
||||
new_state->hwmixer = mdp5_kms->state->hwmixer;
|
||||
if (mdp5_kms->smp)
|
||||
new_state->smp = mdp5_kms->state->smp;
|
||||
|
||||
@ -165,13 +166,16 @@ static void mdp5_kms_destroy(struct msm_kms *kms)
|
||||
struct msm_gem_address_space *aspace = mdp5_kms->aspace;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < mdp5_kms->num_hwmixers; i++)
|
||||
mdp5_mixer_destroy(mdp5_kms->hwmixers[i]);
|
||||
|
||||
for (i = 0; i < mdp5_kms->num_hwpipes; i++)
|
||||
mdp5_pipe_destroy(mdp5_kms->hwpipes[i]);
|
||||
|
||||
if (aspace) {
|
||||
aspace->mmu->funcs->detach(aspace->mmu,
|
||||
iommu_ports, ARRAY_SIZE(iommu_ports));
|
||||
msm_gem_address_space_destroy(aspace);
|
||||
msm_gem_address_space_put(aspace);
|
||||
}
|
||||
}
|
||||
|
||||
@ -268,19 +272,14 @@ int mdp5_enable(struct mdp5_kms *mdp5_kms)
|
||||
}
|
||||
|
||||
static struct drm_encoder *construct_encoder(struct mdp5_kms *mdp5_kms,
|
||||
enum mdp5_intf_type intf_type, int intf_num,
|
||||
struct mdp5_ctl *ctl)
|
||||
struct mdp5_interface *intf,
|
||||
struct mdp5_ctl *ctl)
|
||||
{
|
||||
struct drm_device *dev = mdp5_kms->dev;
|
||||
struct msm_drm_private *priv = dev->dev_private;
|
||||
struct drm_encoder *encoder;
|
||||
struct mdp5_interface intf = {
|
||||
.num = intf_num,
|
||||
.type = intf_type,
|
||||
.mode = MDP5_INTF_MODE_NONE,
|
||||
};
|
||||
|
||||
encoder = mdp5_encoder_init(dev, &intf, ctl);
|
||||
encoder = mdp5_encoder_init(dev, intf, ctl);
|
||||
if (IS_ERR(encoder)) {
|
||||
dev_err(dev->dev, "failed to construct encoder\n");
|
||||
return encoder;
|
||||
@ -309,32 +308,28 @@ static int get_dsi_id_from_intf(const struct mdp5_cfg_hw *hw_cfg, int intf_num)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static int modeset_init_intf(struct mdp5_kms *mdp5_kms, int intf_num)
|
||||
static int modeset_init_intf(struct mdp5_kms *mdp5_kms,
|
||||
struct mdp5_interface *intf)
|
||||
{
|
||||
struct drm_device *dev = mdp5_kms->dev;
|
||||
struct msm_drm_private *priv = dev->dev_private;
|
||||
const struct mdp5_cfg_hw *hw_cfg =
|
||||
mdp5_cfg_get_hw_config(mdp5_kms->cfg);
|
||||
enum mdp5_intf_type intf_type = hw_cfg->intf.connect[intf_num];
|
||||
struct mdp5_ctl_manager *ctlm = mdp5_kms->ctlm;
|
||||
struct mdp5_ctl *ctl;
|
||||
struct drm_encoder *encoder;
|
||||
int ret = 0;
|
||||
|
||||
switch (intf_type) {
|
||||
case INTF_DISABLED:
|
||||
break;
|
||||
switch (intf->type) {
|
||||
case INTF_eDP:
|
||||
if (!priv->edp)
|
||||
break;
|
||||
|
||||
ctl = mdp5_ctlm_request(ctlm, intf_num);
|
||||
ctl = mdp5_ctlm_request(ctlm, intf->num);
|
||||
if (!ctl) {
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
}
|
||||
|
||||
encoder = construct_encoder(mdp5_kms, INTF_eDP, intf_num, ctl);
|
||||
encoder = construct_encoder(mdp5_kms, intf, ctl);
|
||||
if (IS_ERR(encoder)) {
|
||||
ret = PTR_ERR(encoder);
|
||||
break;
|
||||
@ -346,13 +341,13 @@ static int modeset_init_intf(struct mdp5_kms *mdp5_kms, int intf_num)
|
||||
if (!priv->hdmi)
|
||||
break;
|
||||
|
||||
ctl = mdp5_ctlm_request(ctlm, intf_num);
|
||||
ctl = mdp5_ctlm_request(ctlm, intf->num);
|
||||
if (!ctl) {
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
}
|
||||
|
||||
encoder = construct_encoder(mdp5_kms, INTF_HDMI, intf_num, ctl);
|
||||
encoder = construct_encoder(mdp5_kms, intf, ctl);
|
||||
if (IS_ERR(encoder)) {
|
||||
ret = PTR_ERR(encoder);
|
||||
break;
|
||||
@ -362,11 +357,13 @@ static int modeset_init_intf(struct mdp5_kms *mdp5_kms, int intf_num)
|
||||
break;
|
||||
case INTF_DSI:
|
||||
{
|
||||
int dsi_id = get_dsi_id_from_intf(hw_cfg, intf_num);
|
||||
const struct mdp5_cfg_hw *hw_cfg =
|
||||
mdp5_cfg_get_hw_config(mdp5_kms->cfg);
|
||||
int dsi_id = get_dsi_id_from_intf(hw_cfg, intf->num);
|
||||
|
||||
if ((dsi_id >= ARRAY_SIZE(priv->dsi)) || (dsi_id < 0)) {
|
||||
dev_err(dev->dev, "failed to find dsi from intf %d\n",
|
||||
intf_num);
|
||||
intf->num);
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
}
|
||||
@ -374,13 +371,13 @@ static int modeset_init_intf(struct mdp5_kms *mdp5_kms, int intf_num)
|
||||
if (!priv->dsi[dsi_id])
|
||||
break;
|
||||
|
||||
ctl = mdp5_ctlm_request(ctlm, intf_num);
|
||||
ctl = mdp5_ctlm_request(ctlm, intf->num);
|
||||
if (!ctl) {
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
}
|
||||
|
||||
encoder = construct_encoder(mdp5_kms, INTF_DSI, intf_num, ctl);
|
||||
encoder = construct_encoder(mdp5_kms, intf, ctl);
|
||||
if (IS_ERR(encoder)) {
|
||||
ret = PTR_ERR(encoder);
|
||||
break;
|
||||
@ -390,7 +387,7 @@ static int modeset_init_intf(struct mdp5_kms *mdp5_kms, int intf_num)
|
||||
break;
|
||||
}
|
||||
default:
|
||||
dev_err(dev->dev, "unknown intf: %d\n", intf_type);
|
||||
dev_err(dev->dev, "unknown intf: %d\n", intf->type);
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
}
|
||||
@ -414,8 +411,8 @@ static int modeset_init(struct mdp5_kms *mdp5_kms)
|
||||
* Construct encoders and modeset initialize connector devices
|
||||
* for each external display interface.
|
||||
*/
|
||||
for (i = 0; i < ARRAY_SIZE(hw_cfg->intf.connect); i++) {
|
||||
ret = modeset_init_intf(mdp5_kms, i);
|
||||
for (i = 0; i < mdp5_kms->num_intfs; i++) {
|
||||
ret = modeset_init_intf(mdp5_kms, mdp5_kms->intfs[i]);
|
||||
if (ret)
|
||||
goto fail;
|
||||
}
|
||||
@ -425,7 +422,7 @@ static int modeset_init(struct mdp5_kms *mdp5_kms)
|
||||
* the MDP5 interfaces) than the number of layer mixers present in HW,
|
||||
* but let's be safe here anyway
|
||||
*/
|
||||
num_crtcs = min(priv->num_encoders, mdp5_cfg->lm.count);
|
||||
num_crtcs = min(priv->num_encoders, mdp5_kms->num_hwmixers);
|
||||
|
||||
/*
|
||||
* Construct planes equaling the number of hw pipes, and CRTCs for the
|
||||
@ -744,6 +741,7 @@ fail:
|
||||
static void mdp5_destroy(struct platform_device *pdev)
|
||||
{
|
||||
struct mdp5_kms *mdp5_kms = platform_get_drvdata(pdev);
|
||||
int i;
|
||||
|
||||
if (mdp5_kms->ctlm)
|
||||
mdp5_ctlm_destroy(mdp5_kms->ctlm);
|
||||
@ -752,6 +750,9 @@ static void mdp5_destroy(struct platform_device *pdev)
|
||||
if (mdp5_kms->cfg)
|
||||
mdp5_cfg_destroy(mdp5_kms->cfg);
|
||||
|
||||
for (i = 0; i < mdp5_kms->num_intfs; i++)
|
||||
kfree(mdp5_kms->intfs[i]);
|
||||
|
||||
if (mdp5_kms->rpm_enabled)
|
||||
pm_runtime_disable(&pdev->dev);
|
||||
|
||||
@ -829,6 +830,64 @@ static int hwpipe_init(struct mdp5_kms *mdp5_kms)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int hwmixer_init(struct mdp5_kms *mdp5_kms)
|
||||
{
|
||||
struct drm_device *dev = mdp5_kms->dev;
|
||||
const struct mdp5_cfg_hw *hw_cfg;
|
||||
int i, ret;
|
||||
|
||||
hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
|
||||
|
||||
for (i = 0; i < hw_cfg->lm.count; i++) {
|
||||
struct mdp5_hw_mixer *mixer;
|
||||
|
||||
mixer = mdp5_mixer_init(&hw_cfg->lm.instances[i]);
|
||||
if (IS_ERR(mixer)) {
|
||||
ret = PTR_ERR(mixer);
|
||||
dev_err(dev->dev, "failed to construct LM%d (%d)\n",
|
||||
i, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
mixer->idx = mdp5_kms->num_hwmixers;
|
||||
mdp5_kms->hwmixers[mdp5_kms->num_hwmixers++] = mixer;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int interface_init(struct mdp5_kms *mdp5_kms)
|
||||
{
|
||||
struct drm_device *dev = mdp5_kms->dev;
|
||||
const struct mdp5_cfg_hw *hw_cfg;
|
||||
const enum mdp5_intf_type *intf_types;
|
||||
int i;
|
||||
|
||||
hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
|
||||
intf_types = hw_cfg->intf.connect;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(hw_cfg->intf.connect); i++) {
|
||||
struct mdp5_interface *intf;
|
||||
|
||||
if (intf_types[i] == INTF_DISABLED)
|
||||
continue;
|
||||
|
||||
intf = kzalloc(sizeof(*intf), GFP_KERNEL);
|
||||
if (!intf) {
|
||||
dev_err(dev->dev, "failed to construct INTF%d\n", i);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
intf->num = i;
|
||||
intf->type = intf_types[i];
|
||||
intf->mode = MDP5_INTF_MODE_NONE;
|
||||
intf->idx = mdp5_kms->num_intfs;
|
||||
mdp5_kms->intfs[mdp5_kms->num_intfs++] = intf;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int mdp5_init(struct platform_device *pdev, struct drm_device *dev)
|
||||
{
|
||||
struct msm_drm_private *priv = dev->dev_private;
|
||||
@ -929,6 +988,14 @@ static int mdp5_init(struct platform_device *pdev, struct drm_device *dev)
|
||||
if (ret)
|
||||
goto fail;
|
||||
|
||||
ret = hwmixer_init(mdp5_kms);
|
||||
if (ret)
|
||||
goto fail;
|
||||
|
||||
ret = interface_init(mdp5_kms);
|
||||
if (ret)
|
||||
goto fail;
|
||||
|
||||
/* set uninit-ed kms */
|
||||
priv->kms = &mdp5_kms->base.base;
|
||||
|
||||
|
@ -23,8 +23,9 @@
|
||||
#include "mdp/mdp_kms.h"
|
||||
#include "mdp5_cfg.h" /* must be included before mdp5.xml.h */
|
||||
#include "mdp5.xml.h"
|
||||
#include "mdp5_ctl.h"
|
||||
#include "mdp5_pipe.h"
|
||||
#include "mdp5_mixer.h"
|
||||
#include "mdp5_ctl.h"
|
||||
#include "mdp5_smp.h"
|
||||
|
||||
struct mdp5_state;
|
||||
@ -39,6 +40,12 @@ struct mdp5_kms {
|
||||
unsigned num_hwpipes;
|
||||
struct mdp5_hw_pipe *hwpipes[SSPP_MAX];
|
||||
|
||||
unsigned num_hwmixers;
|
||||
struct mdp5_hw_mixer *hwmixers[8];
|
||||
|
||||
unsigned num_intfs;
|
||||
struct mdp5_interface *intfs[5];
|
||||
|
||||
struct mdp5_cfg_handler *cfg;
|
||||
uint32_t caps; /* MDP capabilities (MDP_CAP_XXX bits) */
|
||||
|
||||
@ -83,6 +90,7 @@ struct mdp5_kms {
|
||||
*/
|
||||
struct mdp5_state {
|
||||
struct mdp5_hw_pipe_state hwpipe;
|
||||
struct mdp5_hw_mixer_state hwmixer;
|
||||
struct mdp5_smp_state smp;
|
||||
};
|
||||
|
||||
@ -96,6 +104,7 @@ struct mdp5_plane_state {
|
||||
struct drm_plane_state base;
|
||||
|
||||
struct mdp5_hw_pipe *hwpipe;
|
||||
struct mdp5_hw_pipe *r_hwpipe; /* right hwpipe */
|
||||
|
||||
/* aligned with property */
|
||||
uint8_t premultiplied;
|
||||
@ -108,6 +117,28 @@ struct mdp5_plane_state {
|
||||
#define to_mdp5_plane_state(x) \
|
||||
container_of(x, struct mdp5_plane_state, base)
|
||||
|
||||
struct mdp5_pipeline {
|
||||
struct mdp5_interface *intf;
|
||||
struct mdp5_hw_mixer *mixer;
|
||||
struct mdp5_hw_mixer *r_mixer; /* right mixer */
|
||||
};
|
||||
|
||||
struct mdp5_crtc_state {
|
||||
struct drm_crtc_state base;
|
||||
|
||||
struct mdp5_ctl *ctl;
|
||||
struct mdp5_pipeline pipeline;
|
||||
|
||||
/* these are derivatives of intf/mixer state in mdp5_pipeline */
|
||||
u32 vblank_irqmask;
|
||||
u32 err_irqmask;
|
||||
u32 pp_done_irqmask;
|
||||
|
||||
bool cmd_mode;
|
||||
};
|
||||
#define to_mdp5_crtc_state(x) \
|
||||
container_of(x, struct mdp5_crtc_state, base)
|
||||
|
||||
enum mdp5_intf_mode {
|
||||
MDP5_INTF_MODE_NONE = 0,
|
||||
|
||||
@ -121,6 +152,7 @@ enum mdp5_intf_mode {
|
||||
};
|
||||
|
||||
struct mdp5_interface {
|
||||
int idx;
|
||||
int num; /* display interface number */
|
||||
enum mdp5_intf_type type;
|
||||
enum mdp5_intf_mode mode;
|
||||
@ -128,11 +160,11 @@ struct mdp5_interface {
|
||||
|
||||
struct mdp5_encoder {
|
||||
struct drm_encoder base;
|
||||
struct mdp5_interface intf;
|
||||
spinlock_t intf_lock; /* protect REG_MDP5_INTF_* registers */
|
||||
bool enabled;
|
||||
uint32_t bsc;
|
||||
|
||||
struct mdp5_interface *intf;
|
||||
struct mdp5_ctl *ctl;
|
||||
};
|
||||
#define to_mdp5_encoder(x) container_of(x, struct mdp5_encoder, base)
|
||||
@ -197,8 +229,8 @@ static inline uint32_t intf2err(int intf_num)
|
||||
}
|
||||
}
|
||||
|
||||
#define GET_PING_PONG_ID(layer_mixer) ((layer_mixer == 5) ? 3 : layer_mixer)
|
||||
static inline uint32_t intf2vblank(int lm, struct mdp5_interface *intf)
|
||||
static inline uint32_t intf2vblank(struct mdp5_hw_mixer *mixer,
|
||||
struct mdp5_interface *intf)
|
||||
{
|
||||
/*
|
||||
* In case of DSI Command Mode, the Ping Pong's read pointer IRQ
|
||||
@ -208,7 +240,7 @@ static inline uint32_t intf2vblank(int lm, struct mdp5_interface *intf)
|
||||
|
||||
if ((intf->type == INTF_DSI) &&
|
||||
(intf->mode == MDP5_INTF_DSI_MODE_COMMAND))
|
||||
return MDP5_IRQ_PING_PONG_0_RD_PTR << GET_PING_PONG_ID(lm);
|
||||
return MDP5_IRQ_PING_PONG_0_RD_PTR << mixer->pp;
|
||||
|
||||
if (intf->type == INTF_WB)
|
||||
return MDP5_IRQ_WB_2_DONE;
|
||||
@ -222,9 +254,9 @@ static inline uint32_t intf2vblank(int lm, struct mdp5_interface *intf)
|
||||
}
|
||||
}
|
||||
|
||||
static inline uint32_t lm2ppdone(int lm)
|
||||
static inline uint32_t lm2ppdone(struct mdp5_hw_mixer *mixer)
|
||||
{
|
||||
return MDP5_IRQ_PING_PONG_0_DONE << GET_PING_PONG_ID(lm);
|
||||
return MDP5_IRQ_PING_PONG_0_DONE << mixer->pp;
|
||||
}
|
||||
|
||||
int mdp5_disable(struct mdp5_kms *mdp5_kms);
|
||||
@ -243,15 +275,16 @@ void mdp5_irq_domain_fini(struct mdp5_kms *mdp5_kms);
|
||||
|
||||
uint32_t mdp5_plane_get_flush(struct drm_plane *plane);
|
||||
enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane);
|
||||
enum mdp5_pipe mdp5_plane_right_pipe(struct drm_plane *plane);
|
||||
struct drm_plane *mdp5_plane_init(struct drm_device *dev,
|
||||
enum drm_plane_type type);
|
||||
|
||||
struct mdp5_ctl *mdp5_crtc_get_ctl(struct drm_crtc *crtc);
|
||||
uint32_t mdp5_crtc_vblank(struct drm_crtc *crtc);
|
||||
|
||||
int mdp5_crtc_get_lm(struct drm_crtc *crtc);
|
||||
void mdp5_crtc_set_pipeline(struct drm_crtc *crtc,
|
||||
struct mdp5_interface *intf, struct mdp5_ctl *ctl);
|
||||
struct mdp5_hw_mixer *mdp5_crtc_get_mixer(struct drm_crtc *crtc);
|
||||
struct mdp5_pipeline *mdp5_crtc_get_pipeline(struct drm_crtc *crtc);
|
||||
void mdp5_crtc_set_pipeline(struct drm_crtc *crtc);
|
||||
void mdp5_crtc_wait_for_commit_done(struct drm_crtc *crtc);
|
||||
struct drm_crtc *mdp5_crtc_init(struct drm_device *dev,
|
||||
struct drm_plane *plane,
|
||||
|
172
drivers/gpu/drm/msm/mdp/mdp5/mdp5_mixer.c
Normal file
172
drivers/gpu/drm/msm/mdp/mdp5/mdp5_mixer.c
Normal file
@ -0,0 +1,172 @@
|
||||
/*
|
||||
* Copyright (C) 2017 The Linux Foundation. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 as published by
|
||||
* the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License along with
|
||||
* this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include "mdp5_kms.h"
|
||||
|
||||
/*
|
||||
* As of now, there are only 2 combinations possible for source split:
|
||||
*
|
||||
* Left | Right
|
||||
* -----|------
|
||||
* LM0 | LM1
|
||||
* LM2 | LM5
|
||||
*
|
||||
*/
|
||||
static int lm_right_pair[] = { 1, -1, 5, -1, -1, -1 };
|
||||
|
||||
static int get_right_pair_idx(struct mdp5_kms *mdp5_kms, int lm)
|
||||
{
|
||||
int i;
|
||||
int pair_lm;
|
||||
|
||||
pair_lm = lm_right_pair[lm];
|
||||
if (pair_lm < 0)
|
||||
return -EINVAL;
|
||||
|
||||
for (i = 0; i < mdp5_kms->num_hwmixers; i++) {
|
||||
struct mdp5_hw_mixer *mixer = mdp5_kms->hwmixers[i];
|
||||
|
||||
if (mixer->lm == pair_lm)
|
||||
return mixer->idx;
|
||||
}
|
||||
|
||||
return -1;
|
||||
}
|
||||
|
||||
int mdp5_mixer_assign(struct drm_atomic_state *s, struct drm_crtc *crtc,
|
||||
uint32_t caps, struct mdp5_hw_mixer **mixer,
|
||||
struct mdp5_hw_mixer **r_mixer)
|
||||
{
|
||||
struct msm_drm_private *priv = s->dev->dev_private;
|
||||
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(priv->kms));
|
||||
struct mdp5_state *state = mdp5_get_state(s);
|
||||
struct mdp5_hw_mixer_state *new_state;
|
||||
int i;
|
||||
|
||||
if (IS_ERR(state))
|
||||
return PTR_ERR(state);
|
||||
|
||||
new_state = &state->hwmixer;
|
||||
|
||||
for (i = 0; i < mdp5_kms->num_hwmixers; i++) {
|
||||
struct mdp5_hw_mixer *cur = mdp5_kms->hwmixers[i];
|
||||
|
||||
/*
|
||||
* skip if already in-use by a different CRTC. If there is a
|
||||
* mixer already assigned to this CRTC, it means this call is
|
||||
* a request to get an additional right mixer. Assume that the
|
||||
* existing mixer is the 'left' one, and try to see if we can
|
||||
* get its corresponding 'right' pair.
|
||||
*/
|
||||
if (new_state->hwmixer_to_crtc[cur->idx] &&
|
||||
new_state->hwmixer_to_crtc[cur->idx] != crtc)
|
||||
continue;
|
||||
|
||||
/* skip if doesn't support some required caps: */
|
||||
if (caps & ~cur->caps)
|
||||
continue;
|
||||
|
||||
if (r_mixer) {
|
||||
int pair_idx;
|
||||
|
||||
pair_idx = get_right_pair_idx(mdp5_kms, cur->lm);
|
||||
if (pair_idx < 0)
|
||||
return -EINVAL;
|
||||
|
||||
if (new_state->hwmixer_to_crtc[pair_idx])
|
||||
continue;
|
||||
|
||||
*r_mixer = mdp5_kms->hwmixers[pair_idx];
|
||||
}
|
||||
|
||||
/*
|
||||
* prefer a pair-able LM over an unpairable one. We can
|
||||
* switch the CRTC from Normal mode to Source Split mode
|
||||
* without requiring a full modeset if we had already
|
||||
* assigned this CRTC a pair-able LM.
|
||||
*
|
||||
* TODO: There will be assignment sequences which would
|
||||
* result in the CRTC requiring a full modeset, even
|
||||
* if we have the LM resources to prevent it. For a platform
|
||||
* with a few displays, we don't run out of pair-able LMs
|
||||
* so easily. For now, ignore the possibility of requiring
|
||||
* a full modeset.
|
||||
*/
|
||||
if (!(*mixer) || cur->caps & MDP_LM_CAP_PAIR)
|
||||
*mixer = cur;
|
||||
}
|
||||
|
||||
if (!(*mixer))
|
||||
return -ENOMEM;
|
||||
|
||||
if (r_mixer && !(*r_mixer))
|
||||
return -ENOMEM;
|
||||
|
||||
DBG("assigning Layer Mixer %d to crtc %s", (*mixer)->lm, crtc->name);
|
||||
|
||||
new_state->hwmixer_to_crtc[(*mixer)->idx] = crtc;
|
||||
if (r_mixer) {
|
||||
DBG("assigning Right Layer Mixer %d to crtc %s", (*r_mixer)->lm,
|
||||
crtc->name);
|
||||
new_state->hwmixer_to_crtc[(*r_mixer)->idx] = crtc;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void mdp5_mixer_release(struct drm_atomic_state *s, struct mdp5_hw_mixer *mixer)
|
||||
{
|
||||
struct mdp5_state *state = mdp5_get_state(s);
|
||||
struct mdp5_hw_mixer_state *new_state = &state->hwmixer;
|
||||
|
||||
if (!mixer)
|
||||
return;
|
||||
|
||||
if (WARN_ON(!new_state->hwmixer_to_crtc[mixer->idx]))
|
||||
return;
|
||||
|
||||
DBG("%s: release from crtc %s", mixer->name,
|
||||
new_state->hwmixer_to_crtc[mixer->idx]->name);
|
||||
|
||||
new_state->hwmixer_to_crtc[mixer->idx] = NULL;
|
||||
}
|
||||
|
||||
void mdp5_mixer_destroy(struct mdp5_hw_mixer *mixer)
|
||||
{
|
||||
kfree(mixer);
|
||||
}
|
||||
|
||||
static const char * const mixer_names[] = {
|
||||
"LM0", "LM1", "LM2", "LM3", "LM4", "LM5",
|
||||
};
|
||||
|
||||
struct mdp5_hw_mixer *mdp5_mixer_init(const struct mdp5_lm_instance *lm)
|
||||
{
|
||||
struct mdp5_hw_mixer *mixer;
|
||||
|
||||
mixer = kzalloc(sizeof(*mixer), GFP_KERNEL);
|
||||
if (!mixer)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
mixer->name = mixer_names[lm->id];
|
||||
mixer->lm = lm->id;
|
||||
mixer->caps = lm->caps;
|
||||
mixer->pp = lm->pp;
|
||||
mixer->dspp = lm->dspp;
|
||||
mixer->flush_mask = mdp_ctl_flush_mask_lm(lm->id);
|
||||
|
||||
return mixer;
|
||||
}
|
47
drivers/gpu/drm/msm/mdp/mdp5/mdp5_mixer.h
Normal file
47
drivers/gpu/drm/msm/mdp/mdp5/mdp5_mixer.h
Normal file
@ -0,0 +1,47 @@
|
||||
/*
|
||||
* Copyright (C) 2017 The Linux Foundation. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 as published by
|
||||
* the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License along with
|
||||
* this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#ifndef __MDP5_LM_H__
|
||||
#define __MDP5_LM_H__
|
||||
|
||||
/* represents a hw Layer Mixer, one (or more) is dynamically assigned to a crtc */
|
||||
struct mdp5_hw_mixer {
|
||||
int idx;
|
||||
|
||||
const char *name;
|
||||
|
||||
int lm; /* the LM instance # */
|
||||
uint32_t caps;
|
||||
int pp;
|
||||
int dspp;
|
||||
|
||||
uint32_t flush_mask; /* used to commit LM registers */
|
||||
};
|
||||
|
||||
/* global atomic state of assignment between CRTCs and Layer Mixers: */
|
||||
struct mdp5_hw_mixer_state {
|
||||
struct drm_crtc *hwmixer_to_crtc[8];
|
||||
};
|
||||
|
||||
struct mdp5_hw_mixer *mdp5_mixer_init(const struct mdp5_lm_instance *lm);
|
||||
void mdp5_mixer_destroy(struct mdp5_hw_mixer *lm);
|
||||
int mdp5_mixer_assign(struct drm_atomic_state *s, struct drm_crtc *crtc,
|
||||
uint32_t caps, struct mdp5_hw_mixer **mixer,
|
||||
struct mdp5_hw_mixer **r_mixer);
|
||||
void mdp5_mixer_release(struct drm_atomic_state *s,
|
||||
struct mdp5_hw_mixer *mixer);
|
||||
|
||||
#endif /* __MDP5_LM_H__ */
|
@ -135,7 +135,5 @@ struct mdp5_hw_pipe *mdp5_pipe_init(enum mdp5_pipe pipe,
|
||||
hwpipe->caps = caps;
|
||||
hwpipe->flush_mask = mdp_ctl_flush_mask_pipe(pipe);
|
||||
|
||||
spin_lock_init(&hwpipe->pipe_lock);
|
||||
|
||||
return hwpipe;
|
||||
}
|
||||
|
@ -28,7 +28,6 @@ struct mdp5_hw_pipe {
|
||||
const char *name;
|
||||
enum mdp5_pipe pipe;
|
||||
|
||||
spinlock_t pipe_lock; /* protect REG_MDP5_PIPE_* registers */
|
||||
uint32_t reg_offset;
|
||||
uint32_t caps;
|
||||
|
||||
|
@ -22,6 +22,8 @@
|
||||
struct mdp5_plane {
|
||||
struct drm_plane base;
|
||||
|
||||
spinlock_t pipe_lock; /* protect REG_MDP5_PIPE_* registers */
|
||||
|
||||
uint32_t nformats;
|
||||
uint32_t formats[32];
|
||||
};
|
||||
@ -40,9 +42,6 @@ static int mdp5_update_cursor_plane_legacy(struct drm_plane *plane,
|
||||
uint32_t src_w, uint32_t src_h,
|
||||
struct drm_modeset_acquire_ctx *ctx);
|
||||
|
||||
static void set_scanout_locked(struct drm_plane *plane,
|
||||
struct drm_framebuffer *fb);
|
||||
|
||||
static struct mdp5_kms *get_kms(struct drm_plane *plane)
|
||||
{
|
||||
struct msm_drm_private *priv = plane->dev->dev_private;
|
||||
@ -178,9 +177,14 @@ mdp5_plane_atomic_print_state(struct drm_printer *p,
|
||||
const struct drm_plane_state *state)
|
||||
{
|
||||
struct mdp5_plane_state *pstate = to_mdp5_plane_state(state);
|
||||
struct mdp5_kms *mdp5_kms = get_kms(state->plane);
|
||||
|
||||
drm_printf(p, "\thwpipe=%s\n", pstate->hwpipe ?
|
||||
pstate->hwpipe->name : "(null)");
|
||||
if (mdp5_kms->caps & MDP_CAP_SRC_SPLIT)
|
||||
drm_printf(p, "\tright-hwpipe=%s\n",
|
||||
pstate->r_hwpipe ? pstate->r_hwpipe->name :
|
||||
"(null)");
|
||||
drm_printf(p, "\tpremultiplied=%u\n", pstate->premultiplied);
|
||||
drm_printf(p, "\tzpos=%u\n", pstate->zpos);
|
||||
drm_printf(p, "\talpha=%u\n", pstate->alpha);
|
||||
@ -300,7 +304,9 @@ static int mdp5_plane_atomic_check_with_state(struct drm_crtc_state *crtc_state,
|
||||
struct drm_plane_state *old_state = plane->state;
|
||||
struct mdp5_cfg *config = mdp5_cfg_get_config(get_kms(plane)->cfg);
|
||||
bool new_hwpipe = false;
|
||||
bool need_right_hwpipe = false;
|
||||
uint32_t max_width, max_height;
|
||||
bool out_of_bounds = false;
|
||||
uint32_t caps = 0;
|
||||
struct drm_rect clip;
|
||||
int min_scale, max_scale;
|
||||
@ -313,7 +319,23 @@ static int mdp5_plane_atomic_check_with_state(struct drm_crtc_state *crtc_state,
|
||||
max_height = config->hw->lm.max_height << 16;
|
||||
|
||||
/* Make sure source dimensions are within bounds. */
|
||||
if ((state->src_w > max_width) || (state->src_h > max_height)) {
|
||||
if (state->src_h > max_height)
|
||||
out_of_bounds = true;
|
||||
|
||||
if (state->src_w > max_width) {
|
||||
/* If source split is supported, we can go up to 2x
|
||||
* the max LM width, but we'd need to stage another
|
||||
* hwpipe to the right LM. So, the drm_plane would
|
||||
* consist of 2 hwpipes.
|
||||
*/
|
||||
if (config->hw->mdp.caps & MDP_CAP_SRC_SPLIT &&
|
||||
(state->src_w <= 2 * max_width))
|
||||
need_right_hwpipe = true;
|
||||
else
|
||||
out_of_bounds = true;
|
||||
}
|
||||
|
||||
if (out_of_bounds) {
|
||||
struct drm_rect src = drm_plane_state_src(state);
|
||||
DBG("Invalid source size "DRM_RECT_FP_FMT,
|
||||
DRM_RECT_FP_ARG(&src));
|
||||
@ -364,6 +386,15 @@ static int mdp5_plane_atomic_check_with_state(struct drm_crtc_state *crtc_state,
|
||||
if (!mdp5_state->hwpipe || (caps & ~mdp5_state->hwpipe->caps))
|
||||
new_hwpipe = true;
|
||||
|
||||
/*
|
||||
* (re)allocte hw pipe if we're either requesting for 2 hw pipes
|
||||
* or we're switching from 2 hw pipes to 1 hw pipe because the
|
||||
* new src_w can be supported by 1 hw pipe itself.
|
||||
*/
|
||||
if ((need_right_hwpipe && !mdp5_state->r_hwpipe) ||
|
||||
(!need_right_hwpipe && mdp5_state->r_hwpipe))
|
||||
new_hwpipe = true;
|
||||
|
||||
if (mdp5_kms->smp) {
|
||||
const struct mdp_format *format =
|
||||
to_mdp_format(msm_framebuffer_format(state->fb));
|
||||
@ -382,13 +413,36 @@ static int mdp5_plane_atomic_check_with_state(struct drm_crtc_state *crtc_state,
|
||||
* it available for other planes?
|
||||
*/
|
||||
struct mdp5_hw_pipe *old_hwpipe = mdp5_state->hwpipe;
|
||||
struct mdp5_hw_pipe *old_right_hwpipe =
|
||||
mdp5_state->r_hwpipe;
|
||||
|
||||
mdp5_state->hwpipe = mdp5_pipe_assign(state->state,
|
||||
plane, caps, blkcfg);
|
||||
if (IS_ERR(mdp5_state->hwpipe)) {
|
||||
DBG("%s: failed to assign hwpipe!", plane->name);
|
||||
return PTR_ERR(mdp5_state->hwpipe);
|
||||
}
|
||||
|
||||
if (need_right_hwpipe) {
|
||||
mdp5_state->r_hwpipe =
|
||||
mdp5_pipe_assign(state->state, plane,
|
||||
caps, blkcfg);
|
||||
if (IS_ERR(mdp5_state->r_hwpipe)) {
|
||||
DBG("%s: failed to assign right hwpipe",
|
||||
plane->name);
|
||||
return PTR_ERR(mdp5_state->r_hwpipe);
|
||||
}
|
||||
} else {
|
||||
/*
|
||||
* set it to NULL so that the driver knows we
|
||||
* don't have a right hwpipe when committing a
|
||||
* new state
|
||||
*/
|
||||
mdp5_state->r_hwpipe = NULL;
|
||||
}
|
||||
|
||||
mdp5_pipe_release(state->state, old_hwpipe);
|
||||
mdp5_pipe_release(state->state, old_right_hwpipe);
|
||||
}
|
||||
}
|
||||
|
||||
@ -437,13 +491,10 @@ static const struct drm_plane_helper_funcs mdp5_plane_helper_funcs = {
|
||||
.atomic_update = mdp5_plane_atomic_update,
|
||||
};
|
||||
|
||||
static void set_scanout_locked(struct drm_plane *plane,
|
||||
struct drm_framebuffer *fb)
|
||||
static void set_scanout_locked(struct mdp5_kms *mdp5_kms,
|
||||
enum mdp5_pipe pipe,
|
||||
struct drm_framebuffer *fb)
|
||||
{
|
||||
struct mdp5_kms *mdp5_kms = get_kms(plane);
|
||||
struct mdp5_hw_pipe *hwpipe = to_mdp5_plane_state(plane->state)->hwpipe;
|
||||
enum mdp5_pipe pipe = hwpipe->pipe;
|
||||
|
||||
mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_STRIDE_A(pipe),
|
||||
MDP5_PIPE_SRC_STRIDE_A_P0(fb->pitches[0]) |
|
||||
MDP5_PIPE_SRC_STRIDE_A_P1(fb->pitches[1]));
|
||||
@ -460,8 +511,6 @@ static void set_scanout_locked(struct drm_plane *plane,
|
||||
msm_framebuffer_iova(fb, mdp5_kms->id, 2));
|
||||
mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC3_ADDR(pipe),
|
||||
msm_framebuffer_iova(fb, mdp5_kms->id, 3));
|
||||
|
||||
plane->fb = fb;
|
||||
}
|
||||
|
||||
/* Note: mdp5_plane->pipe_lock must be locked */
|
||||
@ -714,95 +763,39 @@ static void mdp5_write_pixel_ext(struct mdp5_kms *mdp5_kms, enum mdp5_pipe pipe,
|
||||
}
|
||||
}
|
||||
|
||||
struct pixel_ext {
|
||||
int left[COMP_MAX];
|
||||
int right[COMP_MAX];
|
||||
int top[COMP_MAX];
|
||||
int bottom[COMP_MAX];
|
||||
};
|
||||
|
||||
static int mdp5_plane_mode_set(struct drm_plane *plane,
|
||||
struct drm_crtc *crtc, struct drm_framebuffer *fb,
|
||||
struct drm_rect *src, struct drm_rect *dest)
|
||||
struct phase_step {
|
||||
u32 x[COMP_MAX];
|
||||
u32 y[COMP_MAX];
|
||||
};
|
||||
|
||||
static void mdp5_hwpipe_mode_set(struct mdp5_kms *mdp5_kms,
|
||||
struct mdp5_hw_pipe *hwpipe,
|
||||
struct drm_framebuffer *fb,
|
||||
struct phase_step *step,
|
||||
struct pixel_ext *pe,
|
||||
u32 scale_config, u32 hdecm, u32 vdecm,
|
||||
bool hflip, bool vflip,
|
||||
int crtc_x, int crtc_y,
|
||||
unsigned int crtc_w, unsigned int crtc_h,
|
||||
u32 src_img_w, u32 src_img_h,
|
||||
u32 src_x, u32 src_y,
|
||||
u32 src_w, u32 src_h)
|
||||
{
|
||||
struct drm_plane_state *pstate = plane->state;
|
||||
struct mdp5_hw_pipe *hwpipe = to_mdp5_plane_state(pstate)->hwpipe;
|
||||
struct mdp5_kms *mdp5_kms = get_kms(plane);
|
||||
enum mdp5_pipe pipe = hwpipe->pipe;
|
||||
const struct mdp_format *format;
|
||||
uint32_t nplanes, config = 0;
|
||||
uint32_t phasex_step[COMP_MAX] = {0,}, phasey_step[COMP_MAX] = {0,};
|
||||
bool pe = hwpipe->caps & MDP_PIPE_CAP_SW_PIX_EXT;
|
||||
int pe_left[COMP_MAX], pe_right[COMP_MAX];
|
||||
int pe_top[COMP_MAX], pe_bottom[COMP_MAX];
|
||||
uint32_t hdecm = 0, vdecm = 0;
|
||||
uint32_t pix_format;
|
||||
unsigned int rotation;
|
||||
bool vflip, hflip;
|
||||
int crtc_x, crtc_y;
|
||||
unsigned int crtc_w, crtc_h;
|
||||
uint32_t src_x, src_y;
|
||||
uint32_t src_w, src_h;
|
||||
unsigned long flags;
|
||||
int ret;
|
||||
|
||||
nplanes = fb->format->num_planes;
|
||||
|
||||
/* bad formats should already be rejected: */
|
||||
if (WARN_ON(nplanes > pipe2nclients(pipe)))
|
||||
return -EINVAL;
|
||||
|
||||
format = to_mdp_format(msm_framebuffer_format(fb));
|
||||
pix_format = format->base.pixel_format;
|
||||
|
||||
src_x = src->x1;
|
||||
src_y = src->y1;
|
||||
src_w = drm_rect_width(src);
|
||||
src_h = drm_rect_height(src);
|
||||
|
||||
crtc_x = dest->x1;
|
||||
crtc_y = dest->y1;
|
||||
crtc_w = drm_rect_width(dest);
|
||||
crtc_h = drm_rect_height(dest);
|
||||
|
||||
/* src values are in Q16 fixed point, convert to integer: */
|
||||
src_x = src_x >> 16;
|
||||
src_y = src_y >> 16;
|
||||
src_w = src_w >> 16;
|
||||
src_h = src_h >> 16;
|
||||
|
||||
DBG("%s: FB[%u] %u,%u,%u,%u -> CRTC[%u] %d,%d,%u,%u", plane->name,
|
||||
fb->base.id, src_x, src_y, src_w, src_h,
|
||||
crtc->base.id, crtc_x, crtc_y, crtc_w, crtc_h);
|
||||
|
||||
ret = calc_scalex_steps(plane, pix_format, src_w, crtc_w, phasex_step);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = calc_scaley_steps(plane, pix_format, src_h, crtc_h, phasey_step);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (hwpipe->caps & MDP_PIPE_CAP_SW_PIX_EXT) {
|
||||
calc_pixel_ext(format, src_w, crtc_w, phasex_step,
|
||||
pe_left, pe_right, true);
|
||||
calc_pixel_ext(format, src_h, crtc_h, phasey_step,
|
||||
pe_top, pe_bottom, false);
|
||||
}
|
||||
|
||||
/* TODO calc hdecm, vdecm */
|
||||
|
||||
/* SCALE is used to both scale and up-sample chroma components */
|
||||
config |= get_scale_config(format, src_w, crtc_w, true);
|
||||
config |= get_scale_config(format, src_h, crtc_h, false);
|
||||
DBG("scale config = %x", config);
|
||||
|
||||
rotation = drm_rotation_simplify(pstate->rotation,
|
||||
DRM_ROTATE_0 |
|
||||
DRM_REFLECT_X |
|
||||
DRM_REFLECT_Y);
|
||||
hflip = !!(rotation & DRM_REFLECT_X);
|
||||
vflip = !!(rotation & DRM_REFLECT_Y);
|
||||
|
||||
spin_lock_irqsave(&hwpipe->pipe_lock, flags);
|
||||
bool has_pe = hwpipe->caps & MDP_PIPE_CAP_SW_PIX_EXT;
|
||||
const struct mdp_format *format =
|
||||
to_mdp_format(msm_framebuffer_format(fb));
|
||||
|
||||
mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_IMG_SIZE(pipe),
|
||||
MDP5_PIPE_SRC_IMG_SIZE_WIDTH(min(fb->width, src_w)) |
|
||||
MDP5_PIPE_SRC_IMG_SIZE_HEIGHT(min(fb->height, src_h)));
|
||||
MDP5_PIPE_SRC_IMG_SIZE_WIDTH(src_img_w) |
|
||||
MDP5_PIPE_SRC_IMG_SIZE_HEIGHT(src_img_h));
|
||||
|
||||
mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_SIZE(pipe),
|
||||
MDP5_PIPE_SRC_SIZE_WIDTH(src_w) |
|
||||
@ -841,7 +834,7 @@ static int mdp5_plane_mode_set(struct drm_plane *plane,
|
||||
mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_OP_MODE(pipe),
|
||||
(hflip ? MDP5_PIPE_SRC_OP_MODE_FLIP_LR : 0) |
|
||||
(vflip ? MDP5_PIPE_SRC_OP_MODE_FLIP_UD : 0) |
|
||||
COND(pe, MDP5_PIPE_SRC_OP_MODE_SW_PIX_EXT_OVERRIDE) |
|
||||
COND(has_pe, MDP5_PIPE_SRC_OP_MODE_SW_PIX_EXT_OVERRIDE) |
|
||||
MDP5_PIPE_SRC_OP_MODE_BWC(BWC_LOSSLESS));
|
||||
|
||||
/* not using secure mode: */
|
||||
@ -849,22 +842,23 @@ static int mdp5_plane_mode_set(struct drm_plane *plane,
|
||||
|
||||
if (hwpipe->caps & MDP_PIPE_CAP_SW_PIX_EXT)
|
||||
mdp5_write_pixel_ext(mdp5_kms, pipe, format,
|
||||
src_w, pe_left, pe_right,
|
||||
src_h, pe_top, pe_bottom);
|
||||
src_w, pe->left, pe->right,
|
||||
src_h, pe->top, pe->bottom);
|
||||
|
||||
if (hwpipe->caps & MDP_PIPE_CAP_SCALE) {
|
||||
mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_PHASE_STEP_X(pipe),
|
||||
phasex_step[COMP_0]);
|
||||
step->x[COMP_0]);
|
||||
mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_PHASE_STEP_Y(pipe),
|
||||
phasey_step[COMP_0]);
|
||||
step->y[COMP_0]);
|
||||
mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_CR_PHASE_STEP_X(pipe),
|
||||
phasex_step[COMP_1_2]);
|
||||
step->x[COMP_1_2]);
|
||||
mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_CR_PHASE_STEP_Y(pipe),
|
||||
phasey_step[COMP_1_2]);
|
||||
step->y[COMP_1_2]);
|
||||
mdp5_write(mdp5_kms, REG_MDP5_PIPE_DECIMATION(pipe),
|
||||
MDP5_PIPE_DECIMATION_VERT(vdecm) |
|
||||
MDP5_PIPE_DECIMATION_HORZ(hdecm));
|
||||
mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_CONFIG(pipe), config);
|
||||
mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_CONFIG(pipe),
|
||||
scale_config);
|
||||
}
|
||||
|
||||
if (hwpipe->caps & MDP_PIPE_CAP_CSC) {
|
||||
@ -875,9 +869,130 @@ static int mdp5_plane_mode_set(struct drm_plane *plane,
|
||||
csc_disable(mdp5_kms, pipe);
|
||||
}
|
||||
|
||||
set_scanout_locked(plane, fb);
|
||||
set_scanout_locked(mdp5_kms, pipe, fb);
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&hwpipe->pipe_lock, flags);
|
||||
static int mdp5_plane_mode_set(struct drm_plane *plane,
|
||||
struct drm_crtc *crtc, struct drm_framebuffer *fb,
|
||||
struct drm_rect *src, struct drm_rect *dest)
|
||||
{
|
||||
struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
|
||||
struct drm_plane_state *pstate = plane->state;
|
||||
struct mdp5_hw_pipe *hwpipe = to_mdp5_plane_state(pstate)->hwpipe;
|
||||
struct mdp5_kms *mdp5_kms = get_kms(plane);
|
||||
enum mdp5_pipe pipe = hwpipe->pipe;
|
||||
struct mdp5_hw_pipe *right_hwpipe;
|
||||
const struct mdp_format *format;
|
||||
uint32_t nplanes, config = 0;
|
||||
struct phase_step step = { 0 };
|
||||
struct pixel_ext pe = { 0 };
|
||||
uint32_t hdecm = 0, vdecm = 0;
|
||||
uint32_t pix_format;
|
||||
unsigned int rotation;
|
||||
bool vflip, hflip;
|
||||
int crtc_x, crtc_y;
|
||||
unsigned int crtc_w, crtc_h;
|
||||
uint32_t src_x, src_y;
|
||||
uint32_t src_w, src_h;
|
||||
uint32_t src_img_w, src_img_h;
|
||||
uint32_t src_x_r;
|
||||
int crtc_x_r;
|
||||
unsigned long flags;
|
||||
int ret;
|
||||
|
||||
nplanes = fb->format->num_planes;
|
||||
|
||||
/* bad formats should already be rejected: */
|
||||
if (WARN_ON(nplanes > pipe2nclients(pipe)))
|
||||
return -EINVAL;
|
||||
|
||||
format = to_mdp_format(msm_framebuffer_format(fb));
|
||||
pix_format = format->base.pixel_format;
|
||||
|
||||
src_x = src->x1;
|
||||
src_y = src->y1;
|
||||
src_w = drm_rect_width(src);
|
||||
src_h = drm_rect_height(src);
|
||||
|
||||
crtc_x = dest->x1;
|
||||
crtc_y = dest->y1;
|
||||
crtc_w = drm_rect_width(dest);
|
||||
crtc_h = drm_rect_height(dest);
|
||||
|
||||
/* src values are in Q16 fixed point, convert to integer: */
|
||||
src_x = src_x >> 16;
|
||||
src_y = src_y >> 16;
|
||||
src_w = src_w >> 16;
|
||||
src_h = src_h >> 16;
|
||||
|
||||
src_img_w = min(fb->width, src_w);
|
||||
src_img_h = min(fb->height, src_h);
|
||||
|
||||
DBG("%s: FB[%u] %u,%u,%u,%u -> CRTC[%u] %d,%d,%u,%u", plane->name,
|
||||
fb->base.id, src_x, src_y, src_w, src_h,
|
||||
crtc->base.id, crtc_x, crtc_y, crtc_w, crtc_h);
|
||||
|
||||
right_hwpipe = to_mdp5_plane_state(pstate)->r_hwpipe;
|
||||
if (right_hwpipe) {
|
||||
/*
|
||||
* if the plane comprises of 2 hw pipes, assume that the width
|
||||
* is split equally across them. The only parameters that varies
|
||||
* between the 2 pipes are src_x and crtc_x
|
||||
*/
|
||||
crtc_w /= 2;
|
||||
src_w /= 2;
|
||||
src_img_w /= 2;
|
||||
|
||||
crtc_x_r = crtc_x + crtc_w;
|
||||
src_x_r = src_x + src_w;
|
||||
}
|
||||
|
||||
ret = calc_scalex_steps(plane, pix_format, src_w, crtc_w, step.x);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = calc_scaley_steps(plane, pix_format, src_h, crtc_h, step.y);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (hwpipe->caps & MDP_PIPE_CAP_SW_PIX_EXT) {
|
||||
calc_pixel_ext(format, src_w, crtc_w, step.x,
|
||||
pe.left, pe.right, true);
|
||||
calc_pixel_ext(format, src_h, crtc_h, step.y,
|
||||
pe.top, pe.bottom, false);
|
||||
}
|
||||
|
||||
/* TODO calc hdecm, vdecm */
|
||||
|
||||
/* SCALE is used to both scale and up-sample chroma components */
|
||||
config |= get_scale_config(format, src_w, crtc_w, true);
|
||||
config |= get_scale_config(format, src_h, crtc_h, false);
|
||||
DBG("scale config = %x", config);
|
||||
|
||||
rotation = drm_rotation_simplify(pstate->rotation,
|
||||
DRM_ROTATE_0 |
|
||||
DRM_REFLECT_X |
|
||||
DRM_REFLECT_Y);
|
||||
hflip = !!(rotation & DRM_REFLECT_X);
|
||||
vflip = !!(rotation & DRM_REFLECT_Y);
|
||||
|
||||
spin_lock_irqsave(&mdp5_plane->pipe_lock, flags);
|
||||
|
||||
mdp5_hwpipe_mode_set(mdp5_kms, hwpipe, fb, &step, &pe,
|
||||
config, hdecm, vdecm, hflip, vflip,
|
||||
crtc_x, crtc_y, crtc_w, crtc_h,
|
||||
src_img_w, src_img_h,
|
||||
src_x, src_y, src_w, src_h);
|
||||
if (right_hwpipe)
|
||||
mdp5_hwpipe_mode_set(mdp5_kms, right_hwpipe, fb, &step, &pe,
|
||||
config, hdecm, vdecm, hflip, vflip,
|
||||
crtc_x_r, crtc_y, crtc_w, crtc_h,
|
||||
src_img_w, src_img_h,
|
||||
src_x_r, src_y, src_w, src_h);
|
||||
|
||||
spin_unlock_irqrestore(&mdp5_plane->pipe_lock, flags);
|
||||
|
||||
plane->fb = fb;
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -934,6 +1049,7 @@ static int mdp5_update_cursor_plane_legacy(struct drm_plane *plane,
|
||||
|
||||
if (new_plane_state->visible) {
|
||||
struct mdp5_ctl *ctl;
|
||||
struct mdp5_pipeline *pipeline = mdp5_crtc_get_pipeline(crtc);
|
||||
|
||||
ret = mdp5_plane_mode_set(plane, crtc, fb,
|
||||
&new_plane_state->src,
|
||||
@ -942,7 +1058,7 @@ static int mdp5_update_cursor_plane_legacy(struct drm_plane *plane,
|
||||
|
||||
ctl = mdp5_crtc_get_ctl(crtc);
|
||||
|
||||
mdp5_ctl_commit(ctl, mdp5_plane_get_flush(plane));
|
||||
mdp5_ctl_commit(ctl, pipeline, mdp5_plane_get_flush(plane));
|
||||
}
|
||||
|
||||
*to_mdp5_plane_state(plane_state) =
|
||||
@ -959,6 +1075,10 @@ slow:
|
||||
src_x, src_y, src_w, src_h, ctx);
|
||||
}
|
||||
|
||||
/*
|
||||
* Use this func and the one below only after the atomic state has been
|
||||
* successfully swapped
|
||||
*/
|
||||
enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane)
|
||||
{
|
||||
struct mdp5_plane_state *pstate = to_mdp5_plane_state(plane->state);
|
||||
@ -969,14 +1089,30 @@ enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane)
|
||||
return pstate->hwpipe->pipe;
|
||||
}
|
||||
|
||||
enum mdp5_pipe mdp5_plane_right_pipe(struct drm_plane *plane)
|
||||
{
|
||||
struct mdp5_plane_state *pstate = to_mdp5_plane_state(plane->state);
|
||||
|
||||
if (!pstate->r_hwpipe)
|
||||
return SSPP_NONE;
|
||||
|
||||
return pstate->r_hwpipe->pipe;
|
||||
}
|
||||
|
||||
uint32_t mdp5_plane_get_flush(struct drm_plane *plane)
|
||||
{
|
||||
struct mdp5_plane_state *pstate = to_mdp5_plane_state(plane->state);
|
||||
u32 mask;
|
||||
|
||||
if (WARN_ON(!pstate->hwpipe))
|
||||
return 0;
|
||||
|
||||
return pstate->hwpipe->flush_mask;
|
||||
mask = pstate->hwpipe->flush_mask;
|
||||
|
||||
if (pstate->r_hwpipe)
|
||||
mask |= pstate->r_hwpipe->flush_mask;
|
||||
|
||||
return mask;
|
||||
}
|
||||
|
||||
/* initialize plane */
|
||||
@ -998,6 +1134,8 @@ struct drm_plane *mdp5_plane_init(struct drm_device *dev,
|
||||
mdp5_plane->nformats = mdp_get_formats(mdp5_plane->formats,
|
||||
ARRAY_SIZE(mdp5_plane->formats), false);
|
||||
|
||||
spin_lock_init(&mdp5_plane->pipe_lock);
|
||||
|
||||
if (type == DRM_PLANE_TYPE_CURSOR)
|
||||
ret = drm_universal_plane_init(dev, plane, 0xff,
|
||||
&mdp5_cursor_plane_funcs,
|
||||
|
@ -104,6 +104,7 @@ const struct msm_format *mdp_get_format(struct msm_kms *kms, uint32_t format);
|
||||
#define MDP_CAP_SMP BIT(0) /* Shared Memory Pool */
|
||||
#define MDP_CAP_DSC BIT(1) /* VESA Display Stream Compression */
|
||||
#define MDP_CAP_CDM BIT(2) /* Chroma Down Module (HDMI 2.0 YUV) */
|
||||
#define MDP_CAP_SRC_SPLIT BIT(3) /* Source Split of SSPPs */
|
||||
|
||||
/* MDP pipe capabilities */
|
||||
#define MDP_PIPE_CAP_HFLIP BIT(0)
|
||||
@ -114,6 +115,11 @@ const struct msm_format *mdp_get_format(struct msm_kms *kms, uint32_t format);
|
||||
#define MDP_PIPE_CAP_SW_PIX_EXT BIT(5)
|
||||
#define MDP_PIPE_CAP_CURSOR BIT(6)
|
||||
|
||||
/* MDP layer mixer caps */
|
||||
#define MDP_LM_CAP_DISPLAY BIT(0)
|
||||
#define MDP_LM_CAP_WB BIT(1)
|
||||
#define MDP_LM_CAP_PAIR BIT(2)
|
||||
|
||||
static inline bool pipe_supports_yuv(uint32_t pipe_caps)
|
||||
{
|
||||
return (pipe_caps & MDP_PIPE_CAP_SCALE) &&
|
||||
|
@ -28,7 +28,9 @@ static int msm_gpu_show(struct drm_device *dev, struct seq_file *m)
|
||||
|
||||
if (gpu) {
|
||||
seq_printf(m, "%s Status:\n", gpu->name);
|
||||
pm_runtime_get_sync(&gpu->pdev->dev);
|
||||
gpu->funcs->show(gpu, m);
|
||||
pm_runtime_put_sync(&gpu->pdev->dev);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -55,14 +55,13 @@ int msm_register_address_space(struct drm_device *dev,
|
||||
struct msm_gem_address_space *aspace)
|
||||
{
|
||||
struct msm_drm_private *priv = dev->dev_private;
|
||||
int idx = priv->num_aspaces++;
|
||||
|
||||
if (WARN_ON(idx >= ARRAY_SIZE(priv->aspace)))
|
||||
if (WARN_ON(priv->num_aspaces >= ARRAY_SIZE(priv->aspace)))
|
||||
return -EINVAL;
|
||||
|
||||
priv->aspace[idx] = aspace;
|
||||
priv->aspace[priv->num_aspaces] = aspace;
|
||||
|
||||
return idx;
|
||||
return priv->num_aspaces++;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_DRM_MSM_REGISTER_LOGGING
|
||||
@ -265,6 +264,8 @@ static int msm_drm_uninit(struct device *dev)
|
||||
|
||||
if (gpu) {
|
||||
mutex_lock(&ddev->struct_mutex);
|
||||
// XXX what do we do here?
|
||||
//pm_runtime_enable(&pdev->dev);
|
||||
gpu->funcs->pm_suspend(gpu);
|
||||
mutex_unlock(&ddev->struct_mutex);
|
||||
gpu->funcs->destroy(gpu);
|
||||
@ -539,7 +540,7 @@ static int msm_open(struct drm_device *dev, struct drm_file *file)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void msm_preclose(struct drm_device *dev, struct drm_file *file)
|
||||
static void msm_postclose(struct drm_device *dev, struct drm_file *file)
|
||||
{
|
||||
struct msm_drm_private *priv = dev->dev_private;
|
||||
struct msm_file_private *ctx = file->driver_priv;
|
||||
@ -812,7 +813,7 @@ static struct drm_driver msm_driver = {
|
||||
DRIVER_ATOMIC |
|
||||
DRIVER_MODESET,
|
||||
.open = msm_open,
|
||||
.preclose = msm_preclose,
|
||||
.postclose = msm_postclose,
|
||||
.lastclose = msm_lastclose,
|
||||
.irq_handler = msm_irq,
|
||||
.irq_preinstall = msm_irq_preinstall,
|
||||
|
@ -191,7 +191,8 @@ void msm_gem_unmap_vma(struct msm_gem_address_space *aspace,
|
||||
int msm_gem_map_vma(struct msm_gem_address_space *aspace,
|
||||
struct msm_gem_vma *vma, struct sg_table *sgt, int npages);
|
||||
|
||||
void msm_gem_address_space_destroy(struct msm_gem_address_space *aspace);
|
||||
void msm_gem_address_space_put(struct msm_gem_address_space *aspace);
|
||||
|
||||
struct msm_gem_address_space *
|
||||
msm_gem_address_space_create(struct device *dev, struct iommu_domain *domain,
|
||||
const char *name);
|
||||
|
@ -18,6 +18,7 @@
|
||||
#ifndef __MSM_GEM_H__
|
||||
#define __MSM_GEM_H__
|
||||
|
||||
#include <linux/kref.h>
|
||||
#include <linux/reservation.h>
|
||||
#include "msm_drv.h"
|
||||
|
||||
@ -31,6 +32,7 @@ struct msm_gem_address_space {
|
||||
*/
|
||||
struct drm_mm mm;
|
||||
struct msm_mmu *mmu;
|
||||
struct kref kref;
|
||||
};
|
||||
|
||||
struct msm_gem_vma {
|
||||
|
@ -404,6 +404,24 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
|
||||
if (MSM_PIPE_FLAGS(args->flags) & ~MSM_SUBMIT_FLAGS)
|
||||
return -EINVAL;
|
||||
|
||||
if (args->flags & MSM_SUBMIT_FENCE_FD_IN) {
|
||||
in_fence = sync_file_get_fence(args->fence_fd);
|
||||
|
||||
if (!in_fence)
|
||||
return -EINVAL;
|
||||
|
||||
/* TODO if we get an array-fence due to userspace merging multiple
|
||||
* fences, we need a way to determine if all the backing fences
|
||||
* are from our own context..
|
||||
*/
|
||||
|
||||
if (in_fence->context != gpu->fctx->context) {
|
||||
ret = dma_fence_wait(in_fence, true);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
ret = mutex_lock_interruptible(&dev->struct_mutex);
|
||||
if (ret)
|
||||
return ret;
|
||||
@ -431,27 +449,6 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
if (args->flags & MSM_SUBMIT_FENCE_FD_IN) {
|
||||
in_fence = sync_file_get_fence(args->fence_fd);
|
||||
|
||||
if (!in_fence) {
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* TODO if we get an array-fence due to userspace merging multiple
|
||||
* fences, we need a way to determine if all the backing fences
|
||||
* are from our own context..
|
||||
*/
|
||||
|
||||
if (in_fence->context != gpu->fctx->context) {
|
||||
ret = dma_fence_wait(in_fence, true);
|
||||
if (ret)
|
||||
goto out;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
if (!(args->fence & MSM_SUBMIT_NO_IMPLICIT)) {
|
||||
ret = submit_fence_sync(submit);
|
||||
if (ret)
|
||||
|
@ -19,6 +19,25 @@
|
||||
#include "msm_gem.h"
|
||||
#include "msm_mmu.h"
|
||||
|
||||
static void
|
||||
msm_gem_address_space_destroy(struct kref *kref)
|
||||
{
|
||||
struct msm_gem_address_space *aspace = container_of(kref,
|
||||
struct msm_gem_address_space, kref);
|
||||
|
||||
drm_mm_takedown(&aspace->mm);
|
||||
if (aspace->mmu)
|
||||
aspace->mmu->funcs->destroy(aspace->mmu);
|
||||
kfree(aspace);
|
||||
}
|
||||
|
||||
|
||||
void msm_gem_address_space_put(struct msm_gem_address_space *aspace)
|
||||
{
|
||||
if (aspace)
|
||||
kref_put(&aspace->kref, msm_gem_address_space_destroy);
|
||||
}
|
||||
|
||||
void
|
||||
msm_gem_unmap_vma(struct msm_gem_address_space *aspace,
|
||||
struct msm_gem_vma *vma, struct sg_table *sgt)
|
||||
@ -34,6 +53,8 @@ msm_gem_unmap_vma(struct msm_gem_address_space *aspace,
|
||||
drm_mm_remove_node(&vma->node);
|
||||
|
||||
vma->iova = 0;
|
||||
|
||||
msm_gem_address_space_put(aspace);
|
||||
}
|
||||
|
||||
int
|
||||
@ -57,16 +78,10 @@ msm_gem_map_vma(struct msm_gem_address_space *aspace,
|
||||
size, IOMMU_READ | IOMMU_WRITE);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
/* Get a reference to the aspace to keep it around */
|
||||
kref_get(&aspace->kref);
|
||||
|
||||
void
|
||||
msm_gem_address_space_destroy(struct msm_gem_address_space *aspace)
|
||||
{
|
||||
drm_mm_takedown(&aspace->mm);
|
||||
if (aspace->mmu)
|
||||
aspace->mmu->funcs->destroy(aspace->mmu);
|
||||
kfree(aspace);
|
||||
return ret;
|
||||
}
|
||||
|
||||
struct msm_gem_address_space *
|
||||
@ -85,5 +100,7 @@ msm_gem_address_space_create(struct device *dev, struct iommu_domain *domain,
|
||||
drm_mm_init(&aspace->mm, (domain->geometry.aperture_start >> PAGE_SHIFT),
|
||||
(domain->geometry.aperture_end >> PAGE_SHIFT) - 1);
|
||||
|
||||
kref_init(&aspace->kref);
|
||||
|
||||
return aspace;
|
||||
}
|
||||
|
@ -93,18 +93,18 @@ static int enable_clk(struct msm_gpu *gpu)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (gpu->grp_clks[0] && gpu->fast_rate)
|
||||
clk_set_rate(gpu->grp_clks[0], gpu->fast_rate);
|
||||
if (gpu->core_clk && gpu->fast_rate)
|
||||
clk_set_rate(gpu->core_clk, gpu->fast_rate);
|
||||
|
||||
/* Set the RBBM timer rate to 19.2Mhz */
|
||||
if (gpu->grp_clks[2])
|
||||
clk_set_rate(gpu->grp_clks[2], 19200000);
|
||||
if (gpu->rbbmtimer_clk)
|
||||
clk_set_rate(gpu->rbbmtimer_clk, 19200000);
|
||||
|
||||
for (i = ARRAY_SIZE(gpu->grp_clks) - 1; i >= 0; i--)
|
||||
for (i = gpu->nr_clocks - 1; i >= 0; i--)
|
||||
if (gpu->grp_clks[i])
|
||||
clk_prepare(gpu->grp_clks[i]);
|
||||
|
||||
for (i = ARRAY_SIZE(gpu->grp_clks) - 1; i >= 0; i--)
|
||||
for (i = gpu->nr_clocks - 1; i >= 0; i--)
|
||||
if (gpu->grp_clks[i])
|
||||
clk_enable(gpu->grp_clks[i]);
|
||||
|
||||
@ -115,19 +115,24 @@ static int disable_clk(struct msm_gpu *gpu)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = ARRAY_SIZE(gpu->grp_clks) - 1; i >= 0; i--)
|
||||
for (i = gpu->nr_clocks - 1; i >= 0; i--)
|
||||
if (gpu->grp_clks[i])
|
||||
clk_disable(gpu->grp_clks[i]);
|
||||
|
||||
for (i = ARRAY_SIZE(gpu->grp_clks) - 1; i >= 0; i--)
|
||||
for (i = gpu->nr_clocks - 1; i >= 0; i--)
|
||||
if (gpu->grp_clks[i])
|
||||
clk_unprepare(gpu->grp_clks[i]);
|
||||
|
||||
if (gpu->grp_clks[0] && gpu->slow_rate)
|
||||
clk_set_rate(gpu->grp_clks[0], gpu->slow_rate);
|
||||
/*
|
||||
* Set the clock to a deliberately low rate. On older targets the clock
|
||||
* speed had to be non zero to avoid problems. On newer targets this
|
||||
* will be rounded down to zero anyway so it all works out.
|
||||
*/
|
||||
if (gpu->core_clk)
|
||||
clk_set_rate(gpu->core_clk, 27000000);
|
||||
|
||||
if (gpu->grp_clks[2])
|
||||
clk_set_rate(gpu->grp_clks[2], 0);
|
||||
if (gpu->rbbmtimer_clk)
|
||||
clk_set_rate(gpu->rbbmtimer_clk, 0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -152,18 +157,9 @@ static int disable_axi(struct msm_gpu *gpu)
|
||||
|
||||
int msm_gpu_pm_resume(struct msm_gpu *gpu)
|
||||
{
|
||||
struct drm_device *dev = gpu->dev;
|
||||
int ret;
|
||||
|
||||
DBG("%s: active_cnt=%d", gpu->name, gpu->active_cnt);
|
||||
|
||||
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
|
||||
|
||||
if (gpu->active_cnt++ > 0)
|
||||
return 0;
|
||||
|
||||
if (WARN_ON(gpu->active_cnt <= 0))
|
||||
return -EINVAL;
|
||||
DBG("%s", gpu->name);
|
||||
|
||||
ret = enable_pwrrail(gpu);
|
||||
if (ret)
|
||||
@ -177,23 +173,16 @@ int msm_gpu_pm_resume(struct msm_gpu *gpu)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
gpu->needs_hw_init = true;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int msm_gpu_pm_suspend(struct msm_gpu *gpu)
|
||||
{
|
||||
struct drm_device *dev = gpu->dev;
|
||||
int ret;
|
||||
|
||||
DBG("%s: active_cnt=%d", gpu->name, gpu->active_cnt);
|
||||
|
||||
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
|
||||
|
||||
if (--gpu->active_cnt > 0)
|
||||
return 0;
|
||||
|
||||
if (WARN_ON(gpu->active_cnt < 0))
|
||||
return -EINVAL;
|
||||
DBG("%s", gpu->name);
|
||||
|
||||
ret = disable_axi(gpu);
|
||||
if (ret)
|
||||
@ -210,53 +199,20 @@ int msm_gpu_pm_suspend(struct msm_gpu *gpu)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Inactivity detection (for suspend):
|
||||
*/
|
||||
|
||||
static void inactive_worker(struct work_struct *work)
|
||||
int msm_gpu_hw_init(struct msm_gpu *gpu)
|
||||
{
|
||||
struct msm_gpu *gpu = container_of(work, struct msm_gpu, inactive_work);
|
||||
struct drm_device *dev = gpu->dev;
|
||||
int ret;
|
||||
|
||||
if (gpu->inactive)
|
||||
return;
|
||||
if (!gpu->needs_hw_init)
|
||||
return 0;
|
||||
|
||||
DBG("%s: inactive!\n", gpu->name);
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
if (!(msm_gpu_active(gpu) || gpu->inactive)) {
|
||||
disable_axi(gpu);
|
||||
disable_clk(gpu);
|
||||
gpu->inactive = true;
|
||||
}
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
}
|
||||
disable_irq(gpu->irq);
|
||||
ret = gpu->funcs->hw_init(gpu);
|
||||
if (!ret)
|
||||
gpu->needs_hw_init = false;
|
||||
enable_irq(gpu->irq);
|
||||
|
||||
static void inactive_handler(unsigned long data)
|
||||
{
|
||||
struct msm_gpu *gpu = (struct msm_gpu *)data;
|
||||
struct msm_drm_private *priv = gpu->dev->dev_private;
|
||||
|
||||
queue_work(priv->wq, &gpu->inactive_work);
|
||||
}
|
||||
|
||||
/* cancel inactive timer and make sure we are awake: */
|
||||
static void inactive_cancel(struct msm_gpu *gpu)
|
||||
{
|
||||
DBG("%s", gpu->name);
|
||||
del_timer(&gpu->inactive_timer);
|
||||
if (gpu->inactive) {
|
||||
enable_clk(gpu);
|
||||
enable_axi(gpu);
|
||||
gpu->inactive = false;
|
||||
}
|
||||
}
|
||||
|
||||
static void inactive_start(struct msm_gpu *gpu)
|
||||
{
|
||||
DBG("%s", gpu->name);
|
||||
mod_timer(&gpu->inactive_timer,
|
||||
round_jiffies_up(jiffies + DRM_MSM_INACTIVE_JIFFIES));
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -296,8 +252,9 @@ static void recover_worker(struct work_struct *work)
|
||||
/* retire completed submits, plus the one that hung: */
|
||||
retire_submits(gpu);
|
||||
|
||||
inactive_cancel(gpu);
|
||||
pm_runtime_get_sync(&gpu->pdev->dev);
|
||||
gpu->funcs->recover(gpu);
|
||||
pm_runtime_put_sync(&gpu->pdev->dev);
|
||||
|
||||
/* replay the remaining submits after the one that hung: */
|
||||
list_for_each_entry(submit, &gpu->submit_list, node) {
|
||||
@ -400,6 +357,8 @@ void msm_gpu_perfcntr_start(struct msm_gpu *gpu)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
pm_runtime_get_sync(&gpu->pdev->dev);
|
||||
|
||||
spin_lock_irqsave(&gpu->perf_lock, flags);
|
||||
/* we could dynamically enable/disable perfcntr registers too.. */
|
||||
gpu->last_sample.active = msm_gpu_active(gpu);
|
||||
@ -413,6 +372,7 @@ void msm_gpu_perfcntr_start(struct msm_gpu *gpu)
|
||||
void msm_gpu_perfcntr_stop(struct msm_gpu *gpu)
|
||||
{
|
||||
gpu->perfcntr_active = false;
|
||||
pm_runtime_put_sync(&gpu->pdev->dev);
|
||||
}
|
||||
|
||||
/* returns -errno or # of cntrs sampled */
|
||||
@ -458,6 +418,8 @@ static void retire_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
|
||||
drm_gem_object_unreference(&msm_obj->base);
|
||||
}
|
||||
|
||||
pm_runtime_mark_last_busy(&gpu->pdev->dev);
|
||||
pm_runtime_put_autosuspend(&gpu->pdev->dev);
|
||||
msm_gem_submit_free(submit);
|
||||
}
|
||||
|
||||
@ -492,9 +454,6 @@ static void retire_worker(struct work_struct *work)
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
retire_submits(gpu);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
if (!msm_gpu_active(gpu))
|
||||
inactive_start(gpu);
|
||||
}
|
||||
|
||||
/* call from irq handler to schedule work to retire bo's */
|
||||
@ -515,7 +474,9 @@ void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
|
||||
|
||||
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
|
||||
|
||||
inactive_cancel(gpu);
|
||||
pm_runtime_get_sync(&gpu->pdev->dev);
|
||||
|
||||
msm_gpu_hw_init(gpu);
|
||||
|
||||
list_add_tail(&submit->node, &gpu->submit_list);
|
||||
|
||||
@ -559,16 +520,52 @@ static irqreturn_t irq_handler(int irq, void *data)
|
||||
return gpu->funcs->irq(gpu);
|
||||
}
|
||||
|
||||
static const char *clk_names[] = {
|
||||
"core", "iface", "rbbmtimer", "mem", "mem_iface", "alt_mem_iface",
|
||||
};
|
||||
static struct clk *get_clock(struct device *dev, const char *name)
|
||||
{
|
||||
struct clk *clk = devm_clk_get(dev, name);
|
||||
|
||||
return IS_ERR(clk) ? NULL : clk;
|
||||
}
|
||||
|
||||
static int get_clocks(struct platform_device *pdev, struct msm_gpu *gpu)
|
||||
{
|
||||
struct device *dev = &pdev->dev;
|
||||
struct property *prop;
|
||||
const char *name;
|
||||
int i = 0;
|
||||
|
||||
gpu->nr_clocks = of_property_count_strings(dev->of_node, "clock-names");
|
||||
if (gpu->nr_clocks < 1) {
|
||||
gpu->nr_clocks = 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
gpu->grp_clks = devm_kcalloc(dev, sizeof(struct clk *), gpu->nr_clocks,
|
||||
GFP_KERNEL);
|
||||
if (!gpu->grp_clks)
|
||||
return -ENOMEM;
|
||||
|
||||
of_property_for_each_string(dev->of_node, "clock-names", prop, name) {
|
||||
gpu->grp_clks[i] = get_clock(dev, name);
|
||||
|
||||
/* Remember the key clocks that we need to control later */
|
||||
if (!strcmp(name, "core"))
|
||||
gpu->core_clk = gpu->grp_clks[i];
|
||||
else if (!strcmp(name, "rbbmtimer"))
|
||||
gpu->rbbmtimer_clk = gpu->grp_clks[i];
|
||||
|
||||
++i;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
|
||||
struct msm_gpu *gpu, const struct msm_gpu_funcs *funcs,
|
||||
const char *name, const char *ioname, const char *irqname, int ringsz)
|
||||
{
|
||||
struct iommu_domain *iommu;
|
||||
int i, ret;
|
||||
int ret;
|
||||
|
||||
if (WARN_ON(gpu->num_perfcntrs > ARRAY_SIZE(gpu->last_cntrs)))
|
||||
gpu->num_perfcntrs = ARRAY_SIZE(gpu->last_cntrs);
|
||||
@ -576,7 +573,6 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
|
||||
gpu->dev = drm;
|
||||
gpu->funcs = funcs;
|
||||
gpu->name = name;
|
||||
gpu->inactive = true;
|
||||
gpu->fctx = msm_fence_context_alloc(drm, name);
|
||||
if (IS_ERR(gpu->fctx)) {
|
||||
ret = PTR_ERR(gpu->fctx);
|
||||
@ -586,19 +582,15 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
|
||||
|
||||
INIT_LIST_HEAD(&gpu->active_list);
|
||||
INIT_WORK(&gpu->retire_work, retire_worker);
|
||||
INIT_WORK(&gpu->inactive_work, inactive_worker);
|
||||
INIT_WORK(&gpu->recover_work, recover_worker);
|
||||
|
||||
INIT_LIST_HEAD(&gpu->submit_list);
|
||||
|
||||
setup_timer(&gpu->inactive_timer, inactive_handler,
|
||||
(unsigned long)gpu);
|
||||
setup_timer(&gpu->hangcheck_timer, hangcheck_handler,
|
||||
(unsigned long)gpu);
|
||||
|
||||
spin_lock_init(&gpu->perf_lock);
|
||||
|
||||
BUG_ON(ARRAY_SIZE(clk_names) != ARRAY_SIZE(gpu->grp_clks));
|
||||
|
||||
/* Map registers: */
|
||||
gpu->mmio = msm_ioremap(pdev, ioname, name);
|
||||
@ -622,13 +614,9 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
|
||||
goto fail;
|
||||
}
|
||||
|
||||
/* Acquire clocks: */
|
||||
for (i = 0; i < ARRAY_SIZE(clk_names); i++) {
|
||||
gpu->grp_clks[i] = msm_clk_get(pdev, clk_names[i]);
|
||||
DBG("grp_clks[%s]: %p", clk_names[i], gpu->grp_clks[i]);
|
||||
if (IS_ERR(gpu->grp_clks[i]))
|
||||
gpu->grp_clks[i] = NULL;
|
||||
}
|
||||
ret = get_clocks(pdev, gpu);
|
||||
if (ret)
|
||||
goto fail;
|
||||
|
||||
gpu->ebi1_clk = msm_clk_get(pdev, "bus");
|
||||
DBG("ebi1_clk: %p", gpu->ebi1_clk);
|
||||
@ -684,6 +672,9 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
|
||||
goto fail;
|
||||
}
|
||||
|
||||
gpu->pdev = pdev;
|
||||
platform_set_drvdata(pdev, gpu);
|
||||
|
||||
bs_init(gpu);
|
||||
|
||||
return 0;
|
||||
|
@ -64,6 +64,7 @@ struct msm_gpu_funcs {
|
||||
struct msm_gpu {
|
||||
const char *name;
|
||||
struct drm_device *dev;
|
||||
struct platform_device *pdev;
|
||||
const struct msm_gpu_funcs *funcs;
|
||||
|
||||
/* performance counters (hw & sw): */
|
||||
@ -88,9 +89,8 @@ struct msm_gpu {
|
||||
/* fencing: */
|
||||
struct msm_fence_context *fctx;
|
||||
|
||||
/* is gpu powered/active? */
|
||||
int active_cnt;
|
||||
bool inactive;
|
||||
/* does gpu need hw_init? */
|
||||
bool needs_hw_init;
|
||||
|
||||
/* worker for handling active-list retiring: */
|
||||
struct work_struct retire_work;
|
||||
@ -103,8 +103,10 @@ struct msm_gpu {
|
||||
|
||||
/* Power Control: */
|
||||
struct regulator *gpu_reg, *gpu_cx;
|
||||
struct clk *ebi1_clk, *grp_clks[6];
|
||||
uint32_t fast_rate, slow_rate, bus_freq;
|
||||
struct clk **grp_clks;
|
||||
int nr_clocks;
|
||||
struct clk *ebi1_clk, *core_clk, *rbbmtimer_clk;
|
||||
uint32_t fast_rate, bus_freq;
|
||||
|
||||
#ifdef DOWNSTREAM_CONFIG_MSM_BUS_SCALING
|
||||
struct msm_bus_scale_pdata *bus_scale_table;
|
||||
@ -114,9 +116,7 @@ struct msm_gpu {
|
||||
/* Hang and Inactivity Detection:
|
||||
*/
|
||||
#define DRM_MSM_INACTIVE_PERIOD 66 /* in ms (roughly four frames) */
|
||||
#define DRM_MSM_INACTIVE_JIFFIES msecs_to_jiffies(DRM_MSM_INACTIVE_PERIOD)
|
||||
struct timer_list inactive_timer;
|
||||
struct work_struct inactive_work;
|
||||
|
||||
#define DRM_MSM_HANGCHECK_PERIOD 500 /* in ms */
|
||||
#define DRM_MSM_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_MSM_HANGCHECK_PERIOD)
|
||||
struct timer_list hangcheck_timer;
|
||||
@ -196,6 +196,8 @@ static inline void gpu_write64(struct msm_gpu *gpu, u32 lo, u32 hi, u64 val)
|
||||
int msm_gpu_pm_suspend(struct msm_gpu *gpu);
|
||||
int msm_gpu_pm_resume(struct msm_gpu *gpu);
|
||||
|
||||
int msm_gpu_hw_init(struct msm_gpu *gpu);
|
||||
|
||||
void msm_gpu_perfcntr_start(struct msm_gpu *gpu);
|
||||
void msm_gpu_perfcntr_stop(struct msm_gpu *gpu);
|
||||
int msm_gpu_perfcntr_sample(struct msm_gpu *gpu, uint32_t *activetime,
|
||||
|
@ -38,78 +38,47 @@ static int msm_iommu_attach(struct msm_mmu *mmu, const char * const *names,
|
||||
int cnt)
|
||||
{
|
||||
struct msm_iommu *iommu = to_msm_iommu(mmu);
|
||||
return iommu_attach_device(iommu->domain, mmu->dev);
|
||||
int ret;
|
||||
|
||||
pm_runtime_get_sync(mmu->dev);
|
||||
ret = iommu_attach_device(iommu->domain, mmu->dev);
|
||||
pm_runtime_put_sync(mmu->dev);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void msm_iommu_detach(struct msm_mmu *mmu, const char * const *names,
|
||||
int cnt)
|
||||
{
|
||||
struct msm_iommu *iommu = to_msm_iommu(mmu);
|
||||
|
||||
pm_runtime_get_sync(mmu->dev);
|
||||
iommu_detach_device(iommu->domain, mmu->dev);
|
||||
pm_runtime_put_sync(mmu->dev);
|
||||
}
|
||||
|
||||
static int msm_iommu_map(struct msm_mmu *mmu, uint64_t iova,
|
||||
struct sg_table *sgt, unsigned len, int prot)
|
||||
{
|
||||
struct msm_iommu *iommu = to_msm_iommu(mmu);
|
||||
struct iommu_domain *domain = iommu->domain;
|
||||
struct scatterlist *sg;
|
||||
unsigned long da = iova;
|
||||
unsigned int i, j;
|
||||
int ret;
|
||||
size_t ret;
|
||||
|
||||
if (!domain || !sgt)
|
||||
return -EINVAL;
|
||||
// pm_runtime_get_sync(mmu->dev);
|
||||
ret = iommu_map_sg(iommu->domain, iova, sgt->sgl, sgt->nents, prot);
|
||||
// pm_runtime_put_sync(mmu->dev);
|
||||
WARN_ON(ret < 0);
|
||||
|
||||
for_each_sg(sgt->sgl, sg, sgt->nents, i) {
|
||||
dma_addr_t pa = sg_phys(sg) - sg->offset;
|
||||
size_t bytes = sg->length + sg->offset;
|
||||
|
||||
VERB("map[%d]: %08lx %08lx(%zx)", i, da, (unsigned long)pa, bytes);
|
||||
|
||||
ret = iommu_map(domain, da, pa, bytes, prot);
|
||||
if (ret)
|
||||
goto fail;
|
||||
|
||||
da += bytes;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
fail:
|
||||
da = iova;
|
||||
|
||||
for_each_sg(sgt->sgl, sg, i, j) {
|
||||
size_t bytes = sg->length + sg->offset;
|
||||
iommu_unmap(domain, da, bytes);
|
||||
da += bytes;
|
||||
}
|
||||
return ret;
|
||||
return (ret == len) ? 0 : -EINVAL;
|
||||
}
|
||||
|
||||
static int msm_iommu_unmap(struct msm_mmu *mmu, uint64_t iova,
|
||||
struct sg_table *sgt, unsigned len)
|
||||
{
|
||||
struct msm_iommu *iommu = to_msm_iommu(mmu);
|
||||
struct iommu_domain *domain = iommu->domain;
|
||||
struct scatterlist *sg;
|
||||
unsigned long da = iova;
|
||||
int i;
|
||||
|
||||
for_each_sg(sgt->sgl, sg, sgt->nents, i) {
|
||||
size_t bytes = sg->length + sg->offset;
|
||||
size_t unmapped;
|
||||
|
||||
unmapped = iommu_unmap(domain, da, bytes);
|
||||
if (unmapped < bytes)
|
||||
return unmapped;
|
||||
|
||||
VERB("unmap[%d]: %08lx(%zx)", i, da, bytes);
|
||||
|
||||
BUG_ON(!PAGE_ALIGNED(bytes));
|
||||
|
||||
da += bytes;
|
||||
}
|
||||
pm_runtime_get_sync(mmu->dev);
|
||||
iommu_unmap(iommu->domain, iova, len);
|
||||
pm_runtime_put_sync(mmu->dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -322,7 +322,7 @@ void msm_rd_dump_submit(struct msm_gem_submit *submit)
|
||||
}
|
||||
|
||||
for (i = 0; i < submit->nr_cmds; i++) {
|
||||
uint32_t iova = submit->cmd[i].iova;
|
||||
uint64_t iova = submit->cmd[i].iova;
|
||||
uint32_t szd = submit->cmd[i].size; /* in dwords */
|
||||
|
||||
/* snapshot cmdstream bo's (if we haven't already): */
|
||||
@ -341,7 +341,7 @@ void msm_rd_dump_submit(struct msm_gem_submit *submit)
|
||||
case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
|
||||
case MSM_SUBMIT_CMD_BUF:
|
||||
rd_write_section(rd, RD_CMDSTREAM_ADDR,
|
||||
(uint32_t[2]){ iova, szd }, 8);
|
||||
(uint32_t[3]){ iova, szd, iova >> 32 }, 12);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -72,6 +72,7 @@ struct drm_msm_timespec {
|
||||
#define MSM_PARAM_CHIP_ID 0x03
|
||||
#define MSM_PARAM_MAX_FREQ 0x04
|
||||
#define MSM_PARAM_TIMESTAMP 0x05
|
||||
#define MSM_PARAM_GMEM_BASE 0x06
|
||||
|
||||
struct drm_msm_param {
|
||||
__u32 pipe; /* in, MSM_PIPE_x */
|
||||
|
Loading…
Reference in New Issue
Block a user