drm fixes for 5.15-rc6
core: - clamp fbdev size - edid cap blocks read to avoid out of bounds panel: - fix missing crc32 dependency msm: - Fix a new crash on dev file close if the dev file was opened when GPU is not loaded (such as missing fw in initrd) - Switch to single drm_sched_entity per priority level per drm_file to unbreak multi-context userspace - Serialize GMU access to fix GMU OOB errors - Various error path fixes - A couple integer overflow fixes - Fix mdp5 cursor plane WARNs i915: - Fix ACPI object leak - Fix context leak in user proto-context creation - Fix missing i915_sw_fence_fini call hyperv: - hide hw pointer nouveau: - fix engine selection bit r128: - fix UML build rcar-du: - unconncted LVDS regression fix mediatek: - revert CMDQ refinement patches -----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEEEKbZHaGwW9KfbeusDHTzWXnEhr4FAmFpDcIACgkQDHTzWXnE hr4p4A/+KAuKSB1WxV3EuGZWB18Tr04L5cfZW7m/Fy0UKKDnfLoPIlYqyM2Y4aMM yjG6E7jjOGVK7nKFy9tUOB1DLf8me0tV1ZhWn4/+E7cuhJAC7gBeD1AGnF4jphoI yZ23k2wXkFWI/6VVeZUnbjhJDeHcgYcUTqzs9IKdjcnSVx50eVsOu67c8z4uHPP6 KZYZjYIe9nuKXcTaiZSLcAyZ3j0kuzCgBBgeOpGNROC/fGoi7BsoFObgVJCUeeSD 1BK/DdpFxJz79SSldayNQc7IXt9Eu3Vic5lq6/EZjYmY/4sBZpORRRZ6Ky28X+1L GSBWoCC106L2VSZdaAkOurNpqoJox2O7aivHubBigOKXfAmGsjSF6zTY24xsNaCm TCZ1Y6B+d5kdoxYn0j94oye5v5z9Pi/HhOOja8/ETR6/0/uHmdCXko/s0rOlYDTH GKzvbi0tKUZOon3mkEXdZWRcDE523q/4x46qfH/wTA5zmgwbo9Xc8HqWTuwOV9tr N+Huq9oFVhcq8i9RsOQh09vImpen4Dam6dxiCA9GqD8MBH5TL5yarDWW9x3Rq8uX 4YP3J0IqEjl8ytsj1wsrmWYfM4PEbwQLrLW3Ks3SPXa4+mwv8wVTJ2PQkiuzGdCK yaUAbYsQ6DoEOpfliIft5vL1igMFcdP8pq8fUiwmOwmKp01zFhQ= =Ey1r -----END PGP SIGNATURE----- Merge tag 'drm-fixes-2021-10-15-1' of git://anongit.freedesktop.org/drm/drm Pull drm fixes from Dave Airlie: "It has a few scattered msm and i915 fixes, a few core fixes and a mediatek feature revert. I've had to pick a bunch of patches into this, as the drm-misc-fixes tree had a bunch of vc4 patches I wasn't comfortable with sending to you at least as part of this, they were delayed due to your reverts. If it's really useful as fixes I'll do a separate pull. Summary: Core: - clamp fbdev size - edid cap blocks read to avoid out of bounds panel: - fix missing crc32 dependency msm: - Fix a new crash on dev file close if the dev file was opened when GPU is not loaded (such as missing fw in initrd) - Switch to single drm_sched_entity per priority level per drm_file to unbreak multi-context userspace - Serialize GMU access to fix GMU OOB errors - Various error path fixes - A couple integer overflow fixes - Fix mdp5 cursor plane WARNs i915: - Fix ACPI object leak - Fix context leak in user proto-context creation - Fix missing i915_sw_fence_fini call hyperv: - hide hw pointer nouveau: - fix engine selection bit r128: - fix UML build rcar-du: - unconncted LVDS regression fix mediatek: - revert CMDQ refinement patches" * tag 'drm-fixes-2021-10-15-1' of git://anongit.freedesktop.org/drm/drm: (34 commits) drm/panel: olimex-lcd-olinuxino: select CRC32 drm/r128: fix build for UML drm/nouveau/fifo: Reinstate the correct engine bit programming drm/hyperv: Fix double mouse pointers drm/fbdev: Clamp fbdev surface size if too large drm/edid: In connector_bad_edid() cap num_of_ext by num_blocks read drm/i915: Free the returned object of acpi_evaluate_dsm() drm/i915: Fix bug in user proto-context creation that leaked contexts drm: rcar-du: Don't create encoder for unconnected LVDS outputs drm/msm/dsi: fix off by one in dsi_bus_clk_enable error handling drm/msm/dsi: Fix an error code in msm_dsi_modeset_init() drm/msm/dsi: dsi_phy_14nm: Take ready-bit into account in poll_for_ready drm/msm/dsi/phy: fix clock names in 28nm_8960 phy drm/msm/dpu: Fix address of SM8150 PINGPONG5 IRQ register drm/msm: Do not run snapshot on non-DPU devices drm/msm/a3xx: fix error handling in a3xx_gpu_init() drm/msm/a4xx: fix error handling in a4xx_gpu_init() drm/msm: Fix null pointer dereference on pointer edp drm/msm/mdp5: fix cursor-related warnings drm/msm: Avoid potential overflow in timeout_to_jiffies() ...
This commit is contained in:
commit
591a495d44
@ -1834,11 +1834,20 @@ static void connector_bad_edid(struct drm_connector *connector,
|
||||
u8 *edid, int num_blocks)
|
||||
{
|
||||
int i;
|
||||
u8 num_of_ext = edid[0x7e];
|
||||
u8 last_block;
|
||||
|
||||
/*
|
||||
* 0x7e in the EDID is the number of extension blocks. The EDID
|
||||
* is 1 (base block) + num_ext_blocks big. That means we can think
|
||||
* of 0x7e in the EDID of the _index_ of the last block in the
|
||||
* combined chunk of memory.
|
||||
*/
|
||||
last_block = edid[0x7e];
|
||||
|
||||
/* Calculate real checksum for the last edid extension block data */
|
||||
connector->real_edid_checksum =
|
||||
drm_edid_block_checksum(edid + num_of_ext * EDID_LENGTH);
|
||||
if (last_block < num_blocks)
|
||||
connector->real_edid_checksum =
|
||||
drm_edid_block_checksum(edid + last_block * EDID_LENGTH);
|
||||
|
||||
if (connector->bad_edid_counter++ && !drm_debug_enabled(DRM_UT_KMS))
|
||||
return;
|
||||
|
@ -1506,6 +1506,7 @@ static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
|
||||
{
|
||||
struct drm_client_dev *client = &fb_helper->client;
|
||||
struct drm_device *dev = fb_helper->dev;
|
||||
struct drm_mode_config *config = &dev->mode_config;
|
||||
int ret = 0;
|
||||
int crtc_count = 0;
|
||||
struct drm_connector_list_iter conn_iter;
|
||||
@ -1663,6 +1664,11 @@ static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
|
||||
/* Handle our overallocation */
|
||||
sizes.surface_height *= drm_fbdev_overalloc;
|
||||
sizes.surface_height /= 100;
|
||||
if (sizes.surface_height > config->max_height) {
|
||||
drm_dbg_kms(dev, "Fbdev over-allocation too large; clamping height to %d\n",
|
||||
config->max_height);
|
||||
sizes.surface_height = config->max_height;
|
||||
}
|
||||
|
||||
/* push down into drivers */
|
||||
ret = (*fb_helper->funcs->fb_probe)(fb_helper, &sizes);
|
||||
|
@ -46,6 +46,7 @@ int hyperv_mode_config_init(struct hyperv_drm_device *hv);
|
||||
int hyperv_update_vram_location(struct hv_device *hdev, phys_addr_t vram_pp);
|
||||
int hyperv_update_situation(struct hv_device *hdev, u8 active, u32 bpp,
|
||||
u32 w, u32 h, u32 pitch);
|
||||
int hyperv_hide_hw_ptr(struct hv_device *hdev);
|
||||
int hyperv_update_dirt(struct hv_device *hdev, struct drm_rect *rect);
|
||||
int hyperv_connect_vsp(struct hv_device *hdev);
|
||||
|
||||
|
@ -101,6 +101,7 @@ static void hyperv_pipe_enable(struct drm_simple_display_pipe *pipe,
|
||||
struct hyperv_drm_device *hv = to_hv(pipe->crtc.dev);
|
||||
struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state);
|
||||
|
||||
hyperv_hide_hw_ptr(hv->hdev);
|
||||
hyperv_update_situation(hv->hdev, 1, hv->screen_depth,
|
||||
crtc_state->mode.hdisplay,
|
||||
crtc_state->mode.vdisplay,
|
||||
|
@ -299,6 +299,55 @@ int hyperv_update_situation(struct hv_device *hdev, u8 active, u32 bpp,
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Hyper-V supports a hardware cursor feature. It's not used by Linux VM,
|
||||
* but the Hyper-V host still draws a point as an extra mouse pointer,
|
||||
* which is unwanted, especially when Xorg is running.
|
||||
*
|
||||
* The hyperv_fb driver uses synthvid_send_ptr() to hide the unwanted
|
||||
* pointer, by setting msg.ptr_pos.is_visible = 1 and setting the
|
||||
* msg.ptr_shape.data. Note: setting msg.ptr_pos.is_visible to 0 doesn't
|
||||
* work in tests.
|
||||
*
|
||||
* Copy synthvid_send_ptr() to hyperv_drm and rename it to
|
||||
* hyperv_hide_hw_ptr(). Note: hyperv_hide_hw_ptr() is also called in the
|
||||
* handler of the SYNTHVID_FEATURE_CHANGE event, otherwise the host still
|
||||
* draws an extra unwanted mouse pointer after the VM Connection window is
|
||||
* closed and reopened.
|
||||
*/
|
||||
int hyperv_hide_hw_ptr(struct hv_device *hdev)
|
||||
{
|
||||
struct synthvid_msg msg;
|
||||
|
||||
memset(&msg, 0, sizeof(struct synthvid_msg));
|
||||
msg.vid_hdr.type = SYNTHVID_POINTER_POSITION;
|
||||
msg.vid_hdr.size = sizeof(struct synthvid_msg_hdr) +
|
||||
sizeof(struct synthvid_pointer_position);
|
||||
msg.ptr_pos.is_visible = 1;
|
||||
msg.ptr_pos.video_output = 0;
|
||||
msg.ptr_pos.image_x = 0;
|
||||
msg.ptr_pos.image_y = 0;
|
||||
hyperv_sendpacket(hdev, &msg);
|
||||
|
||||
memset(&msg, 0, sizeof(struct synthvid_msg));
|
||||
msg.vid_hdr.type = SYNTHVID_POINTER_SHAPE;
|
||||
msg.vid_hdr.size = sizeof(struct synthvid_msg_hdr) +
|
||||
sizeof(struct synthvid_pointer_shape);
|
||||
msg.ptr_shape.part_idx = SYNTHVID_CURSOR_COMPLETE;
|
||||
msg.ptr_shape.is_argb = 1;
|
||||
msg.ptr_shape.width = 1;
|
||||
msg.ptr_shape.height = 1;
|
||||
msg.ptr_shape.hot_x = 0;
|
||||
msg.ptr_shape.hot_y = 0;
|
||||
msg.ptr_shape.data[0] = 0;
|
||||
msg.ptr_shape.data[1] = 1;
|
||||
msg.ptr_shape.data[2] = 1;
|
||||
msg.ptr_shape.data[3] = 1;
|
||||
hyperv_sendpacket(hdev, &msg);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int hyperv_update_dirt(struct hv_device *hdev, struct drm_rect *rect)
|
||||
{
|
||||
struct hyperv_drm_device *hv = hv_get_drvdata(hdev);
|
||||
@ -392,8 +441,11 @@ static void hyperv_receive_sub(struct hv_device *hdev)
|
||||
return;
|
||||
}
|
||||
|
||||
if (msg->vid_hdr.type == SYNTHVID_FEATURE_CHANGE)
|
||||
if (msg->vid_hdr.type == SYNTHVID_FEATURE_CHANGE) {
|
||||
hv->dirt_needed = msg->feature_chg.is_dirt_needed;
|
||||
if (hv->dirt_needed)
|
||||
hyperv_hide_hw_ptr(hv->hdev);
|
||||
}
|
||||
}
|
||||
|
||||
static void hyperv_receive(void *ctx)
|
||||
|
@ -186,13 +186,16 @@ void intel_dsm_get_bios_data_funcs_supported(struct drm_i915_private *i915)
|
||||
{
|
||||
struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
|
||||
acpi_handle dhandle;
|
||||
union acpi_object *obj;
|
||||
|
||||
dhandle = ACPI_HANDLE(&pdev->dev);
|
||||
if (!dhandle)
|
||||
return;
|
||||
|
||||
acpi_evaluate_dsm(dhandle, &intel_dsm_guid2, INTEL_DSM_REVISION_ID,
|
||||
INTEL_DSM_FN_GET_BIOS_DATA_FUNCS_SUPPORTED, NULL);
|
||||
obj = acpi_evaluate_dsm(dhandle, &intel_dsm_guid2, INTEL_DSM_REVISION_ID,
|
||||
INTEL_DSM_FN_GET_BIOS_DATA_FUNCS_SUPPORTED, NULL);
|
||||
if (obj)
|
||||
ACPI_FREE(obj);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -937,6 +937,10 @@ static struct i915_gem_engines *user_engines(struct i915_gem_context *ctx,
|
||||
unsigned int n;
|
||||
|
||||
e = alloc_engines(num_engines);
|
||||
if (!e)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
e->num_engines = num_engines;
|
||||
|
||||
for (n = 0; n < num_engines; n++) {
|
||||
struct intel_context *ce;
|
||||
int ret;
|
||||
@ -970,7 +974,6 @@ static struct i915_gem_engines *user_engines(struct i915_gem_context *ctx,
|
||||
goto free_engines;
|
||||
}
|
||||
}
|
||||
e->num_engines = num_engines;
|
||||
|
||||
return e;
|
||||
|
||||
|
@ -421,6 +421,7 @@ void intel_context_fini(struct intel_context *ce)
|
||||
|
||||
mutex_destroy(&ce->pin_mutex);
|
||||
i915_active_fini(&ce->active);
|
||||
i915_sw_fence_fini(&ce->guc_blocked);
|
||||
}
|
||||
|
||||
void i915_context_module_exit(void)
|
||||
|
@ -4,8 +4,6 @@
|
||||
*/
|
||||
|
||||
#include <linux/clk.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/mailbox_controller.h>
|
||||
#include <linux/pm_runtime.h>
|
||||
#include <linux/soc/mediatek/mtk-cmdq.h>
|
||||
#include <linux/soc/mediatek/mtk-mmsys.h>
|
||||
@ -52,11 +50,8 @@ struct mtk_drm_crtc {
|
||||
bool pending_async_planes;
|
||||
|
||||
#if IS_REACHABLE(CONFIG_MTK_CMDQ)
|
||||
struct mbox_client cmdq_cl;
|
||||
struct mbox_chan *cmdq_chan;
|
||||
struct cmdq_pkt cmdq_handle;
|
||||
struct cmdq_client *cmdq_client;
|
||||
u32 cmdq_event;
|
||||
u32 cmdq_vblank_cnt;
|
||||
#endif
|
||||
|
||||
struct device *mmsys_dev;
|
||||
@ -227,79 +222,9 @@ struct mtk_ddp_comp *mtk_drm_ddp_comp_for_plane(struct drm_crtc *crtc,
|
||||
}
|
||||
|
||||
#if IS_REACHABLE(CONFIG_MTK_CMDQ)
|
||||
static int mtk_drm_cmdq_pkt_create(struct mbox_chan *chan, struct cmdq_pkt *pkt,
|
||||
size_t size)
|
||||
static void ddp_cmdq_cb(struct cmdq_cb_data data)
|
||||
{
|
||||
struct device *dev;
|
||||
dma_addr_t dma_addr;
|
||||
|
||||
pkt->va_base = kzalloc(size, GFP_KERNEL);
|
||||
if (!pkt->va_base) {
|
||||
kfree(pkt);
|
||||
return -ENOMEM;
|
||||
}
|
||||
pkt->buf_size = size;
|
||||
|
||||
dev = chan->mbox->dev;
|
||||
dma_addr = dma_map_single(dev, pkt->va_base, pkt->buf_size,
|
||||
DMA_TO_DEVICE);
|
||||
if (dma_mapping_error(dev, dma_addr)) {
|
||||
dev_err(dev, "dma map failed, size=%u\n", (u32)(u64)size);
|
||||
kfree(pkt->va_base);
|
||||
kfree(pkt);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
pkt->pa_base = dma_addr;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void mtk_drm_cmdq_pkt_destroy(struct mbox_chan *chan, struct cmdq_pkt *pkt)
|
||||
{
|
||||
dma_unmap_single(chan->mbox->dev, pkt->pa_base, pkt->buf_size,
|
||||
DMA_TO_DEVICE);
|
||||
kfree(pkt->va_base);
|
||||
kfree(pkt);
|
||||
}
|
||||
|
||||
static void ddp_cmdq_cb(struct mbox_client *cl, void *mssg)
|
||||
{
|
||||
struct mtk_drm_crtc *mtk_crtc = container_of(cl, struct mtk_drm_crtc, cmdq_cl);
|
||||
struct cmdq_cb_data *data = mssg;
|
||||
struct mtk_crtc_state *state;
|
||||
unsigned int i;
|
||||
|
||||
state = to_mtk_crtc_state(mtk_crtc->base.state);
|
||||
|
||||
state->pending_config = false;
|
||||
|
||||
if (mtk_crtc->pending_planes) {
|
||||
for (i = 0; i < mtk_crtc->layer_nr; i++) {
|
||||
struct drm_plane *plane = &mtk_crtc->planes[i];
|
||||
struct mtk_plane_state *plane_state;
|
||||
|
||||
plane_state = to_mtk_plane_state(plane->state);
|
||||
|
||||
plane_state->pending.config = false;
|
||||
}
|
||||
mtk_crtc->pending_planes = false;
|
||||
}
|
||||
|
||||
if (mtk_crtc->pending_async_planes) {
|
||||
for (i = 0; i < mtk_crtc->layer_nr; i++) {
|
||||
struct drm_plane *plane = &mtk_crtc->planes[i];
|
||||
struct mtk_plane_state *plane_state;
|
||||
|
||||
plane_state = to_mtk_plane_state(plane->state);
|
||||
|
||||
plane_state->pending.async_config = false;
|
||||
}
|
||||
mtk_crtc->pending_async_planes = false;
|
||||
}
|
||||
|
||||
mtk_crtc->cmdq_vblank_cnt = 0;
|
||||
mtk_drm_cmdq_pkt_destroy(mtk_crtc->cmdq_chan, data->pkt);
|
||||
cmdq_pkt_destroy(data.data);
|
||||
}
|
||||
#endif
|
||||
|
||||
@ -453,8 +378,7 @@ static void mtk_crtc_ddp_config(struct drm_crtc *crtc,
|
||||
state->pending_vrefresh, 0,
|
||||
cmdq_handle);
|
||||
|
||||
if (!cmdq_handle)
|
||||
state->pending_config = false;
|
||||
state->pending_config = false;
|
||||
}
|
||||
|
||||
if (mtk_crtc->pending_planes) {
|
||||
@ -474,12 +398,9 @@ static void mtk_crtc_ddp_config(struct drm_crtc *crtc,
|
||||
mtk_ddp_comp_layer_config(comp, local_layer,
|
||||
plane_state,
|
||||
cmdq_handle);
|
||||
if (!cmdq_handle)
|
||||
plane_state->pending.config = false;
|
||||
plane_state->pending.config = false;
|
||||
}
|
||||
|
||||
if (!cmdq_handle)
|
||||
mtk_crtc->pending_planes = false;
|
||||
mtk_crtc->pending_planes = false;
|
||||
}
|
||||
|
||||
if (mtk_crtc->pending_async_planes) {
|
||||
@ -499,12 +420,9 @@ static void mtk_crtc_ddp_config(struct drm_crtc *crtc,
|
||||
mtk_ddp_comp_layer_config(comp, local_layer,
|
||||
plane_state,
|
||||
cmdq_handle);
|
||||
if (!cmdq_handle)
|
||||
plane_state->pending.async_config = false;
|
||||
plane_state->pending.async_config = false;
|
||||
}
|
||||
|
||||
if (!cmdq_handle)
|
||||
mtk_crtc->pending_async_planes = false;
|
||||
mtk_crtc->pending_async_planes = false;
|
||||
}
|
||||
}
|
||||
|
||||
@ -512,7 +430,7 @@ static void mtk_drm_crtc_update_config(struct mtk_drm_crtc *mtk_crtc,
|
||||
bool needs_vblank)
|
||||
{
|
||||
#if IS_REACHABLE(CONFIG_MTK_CMDQ)
|
||||
struct cmdq_pkt *cmdq_handle = &mtk_crtc->cmdq_handle;
|
||||
struct cmdq_pkt *cmdq_handle;
|
||||
#endif
|
||||
struct drm_crtc *crtc = &mtk_crtc->base;
|
||||
struct mtk_drm_private *priv = crtc->dev->dev_private;
|
||||
@ -550,24 +468,14 @@ static void mtk_drm_crtc_update_config(struct mtk_drm_crtc *mtk_crtc,
|
||||
mtk_mutex_release(mtk_crtc->mutex);
|
||||
}
|
||||
#if IS_REACHABLE(CONFIG_MTK_CMDQ)
|
||||
if (mtk_crtc->cmdq_chan) {
|
||||
mbox_flush(mtk_crtc->cmdq_chan, 2000);
|
||||
cmdq_handle->cmd_buf_size = 0;
|
||||
if (mtk_crtc->cmdq_client) {
|
||||
mbox_flush(mtk_crtc->cmdq_client->chan, 2000);
|
||||
cmdq_handle = cmdq_pkt_create(mtk_crtc->cmdq_client, PAGE_SIZE);
|
||||
cmdq_pkt_clear_event(cmdq_handle, mtk_crtc->cmdq_event);
|
||||
cmdq_pkt_wfe(cmdq_handle, mtk_crtc->cmdq_event, false);
|
||||
mtk_crtc_ddp_config(crtc, cmdq_handle);
|
||||
cmdq_pkt_finalize(cmdq_handle);
|
||||
dma_sync_single_for_device(mtk_crtc->cmdq_chan->mbox->dev,
|
||||
cmdq_handle->pa_base,
|
||||
cmdq_handle->cmd_buf_size,
|
||||
DMA_TO_DEVICE);
|
||||
/*
|
||||
* CMDQ command should execute in next vblank,
|
||||
* If it fail to execute in next 2 vblank, timeout happen.
|
||||
*/
|
||||
mtk_crtc->cmdq_vblank_cnt = 2;
|
||||
mbox_send_message(mtk_crtc->cmdq_chan, cmdq_handle);
|
||||
mbox_client_txdone(mtk_crtc->cmdq_chan, 0);
|
||||
cmdq_pkt_flush_async(cmdq_handle, ddp_cmdq_cb, cmdq_handle);
|
||||
}
|
||||
#endif
|
||||
mtk_crtc->config_updating = false;
|
||||
@ -581,15 +489,12 @@ static void mtk_crtc_ddp_irq(void *data)
|
||||
struct mtk_drm_private *priv = crtc->dev->dev_private;
|
||||
|
||||
#if IS_REACHABLE(CONFIG_MTK_CMDQ)
|
||||
if (!priv->data->shadow_register && !mtk_crtc->cmdq_chan)
|
||||
mtk_crtc_ddp_config(crtc, NULL);
|
||||
else if (mtk_crtc->cmdq_vblank_cnt > 0 && --mtk_crtc->cmdq_vblank_cnt == 0)
|
||||
DRM_ERROR("mtk_crtc %d CMDQ execute command timeout!\n",
|
||||
drm_crtc_index(&mtk_crtc->base));
|
||||
if (!priv->data->shadow_register && !mtk_crtc->cmdq_client)
|
||||
#else
|
||||
if (!priv->data->shadow_register)
|
||||
mtk_crtc_ddp_config(crtc, NULL);
|
||||
#endif
|
||||
mtk_crtc_ddp_config(crtc, NULL);
|
||||
|
||||
mtk_drm_finish_page_flip(mtk_crtc);
|
||||
}
|
||||
|
||||
@ -924,20 +829,16 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev,
|
||||
mutex_init(&mtk_crtc->hw_lock);
|
||||
|
||||
#if IS_REACHABLE(CONFIG_MTK_CMDQ)
|
||||
mtk_crtc->cmdq_cl.dev = mtk_crtc->mmsys_dev;
|
||||
mtk_crtc->cmdq_cl.tx_block = false;
|
||||
mtk_crtc->cmdq_cl.knows_txdone = true;
|
||||
mtk_crtc->cmdq_cl.rx_callback = ddp_cmdq_cb;
|
||||
mtk_crtc->cmdq_chan =
|
||||
mbox_request_channel(&mtk_crtc->cmdq_cl,
|
||||
drm_crtc_index(&mtk_crtc->base));
|
||||
if (IS_ERR(mtk_crtc->cmdq_chan)) {
|
||||
mtk_crtc->cmdq_client =
|
||||
cmdq_mbox_create(mtk_crtc->mmsys_dev,
|
||||
drm_crtc_index(&mtk_crtc->base));
|
||||
if (IS_ERR(mtk_crtc->cmdq_client)) {
|
||||
dev_dbg(dev, "mtk_crtc %d failed to create mailbox client, writing register by CPU now\n",
|
||||
drm_crtc_index(&mtk_crtc->base));
|
||||
mtk_crtc->cmdq_chan = NULL;
|
||||
mtk_crtc->cmdq_client = NULL;
|
||||
}
|
||||
|
||||
if (mtk_crtc->cmdq_chan) {
|
||||
if (mtk_crtc->cmdq_client) {
|
||||
ret = of_property_read_u32_index(priv->mutex_node,
|
||||
"mediatek,gce-events",
|
||||
drm_crtc_index(&mtk_crtc->base),
|
||||
@ -945,18 +846,8 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev,
|
||||
if (ret) {
|
||||
dev_dbg(dev, "mtk_crtc %d failed to get mediatek,gce-events property\n",
|
||||
drm_crtc_index(&mtk_crtc->base));
|
||||
mbox_free_channel(mtk_crtc->cmdq_chan);
|
||||
mtk_crtc->cmdq_chan = NULL;
|
||||
} else {
|
||||
ret = mtk_drm_cmdq_pkt_create(mtk_crtc->cmdq_chan,
|
||||
&mtk_crtc->cmdq_handle,
|
||||
PAGE_SIZE);
|
||||
if (ret) {
|
||||
dev_dbg(dev, "mtk_crtc %d failed to create cmdq packet\n",
|
||||
drm_crtc_index(&mtk_crtc->base));
|
||||
mbox_free_channel(mtk_crtc->cmdq_chan);
|
||||
mtk_crtc->cmdq_chan = NULL;
|
||||
}
|
||||
cmdq_mbox_destroy(mtk_crtc->cmdq_client);
|
||||
mtk_crtc->cmdq_client = NULL;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
@ -571,13 +571,14 @@ struct msm_gpu *a3xx_gpu_init(struct drm_device *dev)
|
||||
}
|
||||
|
||||
icc_path = devm_of_icc_get(&pdev->dev, "gfx-mem");
|
||||
ret = IS_ERR(icc_path);
|
||||
if (ret)
|
||||
if (IS_ERR(icc_path)) {
|
||||
ret = PTR_ERR(icc_path);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
ocmem_icc_path = devm_of_icc_get(&pdev->dev, "ocmem");
|
||||
ret = IS_ERR(ocmem_icc_path);
|
||||
if (ret) {
|
||||
if (IS_ERR(ocmem_icc_path)) {
|
||||
ret = PTR_ERR(ocmem_icc_path);
|
||||
/* allow -ENODATA, ocmem icc is optional */
|
||||
if (ret != -ENODATA)
|
||||
goto fail;
|
||||
|
@ -699,13 +699,14 @@ struct msm_gpu *a4xx_gpu_init(struct drm_device *dev)
|
||||
}
|
||||
|
||||
icc_path = devm_of_icc_get(&pdev->dev, "gfx-mem");
|
||||
ret = IS_ERR(icc_path);
|
||||
if (ret)
|
||||
if (IS_ERR(icc_path)) {
|
||||
ret = PTR_ERR(icc_path);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
ocmem_icc_path = devm_of_icc_get(&pdev->dev, "ocmem");
|
||||
ret = IS_ERR(ocmem_icc_path);
|
||||
if (ret) {
|
||||
if (IS_ERR(ocmem_icc_path)) {
|
||||
ret = PTR_ERR(ocmem_icc_path);
|
||||
/* allow -ENODATA, ocmem icc is optional */
|
||||
if (ret != -ENODATA)
|
||||
goto fail;
|
||||
|
@ -296,6 +296,8 @@ int a6xx_gmu_set_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state)
|
||||
u32 val;
|
||||
int request, ack;
|
||||
|
||||
WARN_ON_ONCE(!mutex_is_locked(&gmu->lock));
|
||||
|
||||
if (state >= ARRAY_SIZE(a6xx_gmu_oob_bits))
|
||||
return -EINVAL;
|
||||
|
||||
@ -337,6 +339,8 @@ void a6xx_gmu_clear_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state)
|
||||
{
|
||||
int bit;
|
||||
|
||||
WARN_ON_ONCE(!mutex_is_locked(&gmu->lock));
|
||||
|
||||
if (state >= ARRAY_SIZE(a6xx_gmu_oob_bits))
|
||||
return;
|
||||
|
||||
@ -1482,6 +1486,8 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
|
||||
if (!pdev)
|
||||
return -ENODEV;
|
||||
|
||||
mutex_init(&gmu->lock);
|
||||
|
||||
gmu->dev = &pdev->dev;
|
||||
|
||||
of_dma_configure(gmu->dev, node, true);
|
||||
|
@ -44,6 +44,9 @@ struct a6xx_gmu_bo {
|
||||
struct a6xx_gmu {
|
||||
struct device *dev;
|
||||
|
||||
/* For serializing communication with the GMU: */
|
||||
struct mutex lock;
|
||||
|
||||
struct msm_gem_address_space *aspace;
|
||||
|
||||
void * __iomem mmio;
|
||||
|
@ -106,7 +106,7 @@ static void a6xx_set_pagetable(struct a6xx_gpu *a6xx_gpu,
|
||||
u32 asid;
|
||||
u64 memptr = rbmemptr(ring, ttbr0);
|
||||
|
||||
if (ctx == a6xx_gpu->cur_ctx)
|
||||
if (ctx->seqno == a6xx_gpu->cur_ctx_seqno)
|
||||
return;
|
||||
|
||||
if (msm_iommu_pagetable_params(ctx->aspace->mmu, &ttbr, &asid))
|
||||
@ -139,7 +139,7 @@ static void a6xx_set_pagetable(struct a6xx_gpu *a6xx_gpu,
|
||||
OUT_PKT7(ring, CP_EVENT_WRITE, 1);
|
||||
OUT_RING(ring, 0x31);
|
||||
|
||||
a6xx_gpu->cur_ctx = ctx;
|
||||
a6xx_gpu->cur_ctx_seqno = ctx->seqno;
|
||||
}
|
||||
|
||||
static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
|
||||
@ -881,7 +881,7 @@ static int a6xx_zap_shader_init(struct msm_gpu *gpu)
|
||||
A6XX_RBBM_INT_0_MASK_UCHE_OOB_ACCESS | \
|
||||
A6XX_RBBM_INT_0_MASK_UCHE_TRAP_INTR)
|
||||
|
||||
static int a6xx_hw_init(struct msm_gpu *gpu)
|
||||
static int hw_init(struct msm_gpu *gpu)
|
||||
{
|
||||
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
|
||||
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
|
||||
@ -1081,7 +1081,7 @@ static int a6xx_hw_init(struct msm_gpu *gpu)
|
||||
/* Always come up on rb 0 */
|
||||
a6xx_gpu->cur_ring = gpu->rb[0];
|
||||
|
||||
a6xx_gpu->cur_ctx = NULL;
|
||||
a6xx_gpu->cur_ctx_seqno = 0;
|
||||
|
||||
/* Enable the SQE_to start the CP engine */
|
||||
gpu_write(gpu, REG_A6XX_CP_SQE_CNTL, 1);
|
||||
@ -1135,6 +1135,19 @@ out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int a6xx_hw_init(struct msm_gpu *gpu)
|
||||
{
|
||||
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
|
||||
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
|
||||
int ret;
|
||||
|
||||
mutex_lock(&a6xx_gpu->gmu.lock);
|
||||
ret = hw_init(gpu);
|
||||
mutex_unlock(&a6xx_gpu->gmu.lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void a6xx_dump(struct msm_gpu *gpu)
|
||||
{
|
||||
DRM_DEV_INFO(&gpu->pdev->dev, "status: %08x\n",
|
||||
@ -1509,7 +1522,9 @@ static int a6xx_pm_resume(struct msm_gpu *gpu)
|
||||
|
||||
trace_msm_gpu_resume(0);
|
||||
|
||||
mutex_lock(&a6xx_gpu->gmu.lock);
|
||||
ret = a6xx_gmu_resume(a6xx_gpu);
|
||||
mutex_unlock(&a6xx_gpu->gmu.lock);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
@ -1532,7 +1547,9 @@ static int a6xx_pm_suspend(struct msm_gpu *gpu)
|
||||
|
||||
msm_devfreq_suspend(gpu);
|
||||
|
||||
mutex_lock(&a6xx_gpu->gmu.lock);
|
||||
ret = a6xx_gmu_stop(a6xx_gpu);
|
||||
mutex_unlock(&a6xx_gpu->gmu.lock);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
@ -1547,18 +1564,19 @@ static int a6xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value)
|
||||
{
|
||||
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
|
||||
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
|
||||
static DEFINE_MUTEX(perfcounter_oob);
|
||||
|
||||
mutex_lock(&perfcounter_oob);
|
||||
mutex_lock(&a6xx_gpu->gmu.lock);
|
||||
|
||||
/* Force the GPU power on so we can read this register */
|
||||
a6xx_gmu_set_oob(&a6xx_gpu->gmu, GMU_OOB_PERFCOUNTER_SET);
|
||||
|
||||
*value = gpu_read64(gpu, REG_A6XX_CP_ALWAYS_ON_COUNTER_LO,
|
||||
REG_A6XX_CP_ALWAYS_ON_COUNTER_HI);
|
||||
REG_A6XX_CP_ALWAYS_ON_COUNTER_HI);
|
||||
|
||||
a6xx_gmu_clear_oob(&a6xx_gpu->gmu, GMU_OOB_PERFCOUNTER_SET);
|
||||
mutex_unlock(&perfcounter_oob);
|
||||
|
||||
mutex_unlock(&a6xx_gpu->gmu.lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1622,6 +1640,16 @@ static unsigned long a6xx_gpu_busy(struct msm_gpu *gpu)
|
||||
return (unsigned long)busy_time;
|
||||
}
|
||||
|
||||
void a6xx_gpu_set_freq(struct msm_gpu *gpu, struct dev_pm_opp *opp)
|
||||
{
|
||||
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
|
||||
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
|
||||
|
||||
mutex_lock(&a6xx_gpu->gmu.lock);
|
||||
a6xx_gmu_set_freq(gpu, opp);
|
||||
mutex_unlock(&a6xx_gpu->gmu.lock);
|
||||
}
|
||||
|
||||
static struct msm_gem_address_space *
|
||||
a6xx_create_address_space(struct msm_gpu *gpu, struct platform_device *pdev)
|
||||
{
|
||||
@ -1766,7 +1794,7 @@ static const struct adreno_gpu_funcs funcs = {
|
||||
#endif
|
||||
.gpu_busy = a6xx_gpu_busy,
|
||||
.gpu_get_freq = a6xx_gmu_get_freq,
|
||||
.gpu_set_freq = a6xx_gmu_set_freq,
|
||||
.gpu_set_freq = a6xx_gpu_set_freq,
|
||||
#if defined(CONFIG_DRM_MSM_GPU_STATE)
|
||||
.gpu_state_get = a6xx_gpu_state_get,
|
||||
.gpu_state_put = a6xx_gpu_state_put,
|
||||
|
@ -19,7 +19,16 @@ struct a6xx_gpu {
|
||||
uint64_t sqe_iova;
|
||||
|
||||
struct msm_ringbuffer *cur_ring;
|
||||
struct msm_file_private *cur_ctx;
|
||||
|
||||
/**
|
||||
* cur_ctx_seqno:
|
||||
*
|
||||
* The ctx->seqno value of the context with current pgtables
|
||||
* installed. Tracked by seqno rather than pointer value to
|
||||
* avoid dangling pointers, and cases where a ctx can be freed
|
||||
* and a new one created with the same address.
|
||||
*/
|
||||
int cur_ctx_seqno;
|
||||
|
||||
struct a6xx_gmu gmu;
|
||||
|
||||
|
@ -794,7 +794,7 @@ static const struct dpu_pingpong_cfg sm8150_pp[] = {
|
||||
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 30),
|
||||
-1),
|
||||
PP_BLK("pingpong_5", PINGPONG_5, 0x72800, MERGE_3D_2, sdm845_pp_sblk,
|
||||
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 30),
|
||||
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 31),
|
||||
-1),
|
||||
};
|
||||
|
||||
|
@ -1125,6 +1125,20 @@ static void mdp5_crtc_reset(struct drm_crtc *crtc)
|
||||
__drm_atomic_helper_crtc_reset(crtc, &mdp5_cstate->base);
|
||||
}
|
||||
|
||||
static const struct drm_crtc_funcs mdp5_crtc_no_lm_cursor_funcs = {
|
||||
.set_config = drm_atomic_helper_set_config,
|
||||
.destroy = mdp5_crtc_destroy,
|
||||
.page_flip = drm_atomic_helper_page_flip,
|
||||
.reset = mdp5_crtc_reset,
|
||||
.atomic_duplicate_state = mdp5_crtc_duplicate_state,
|
||||
.atomic_destroy_state = mdp5_crtc_destroy_state,
|
||||
.atomic_print_state = mdp5_crtc_atomic_print_state,
|
||||
.get_vblank_counter = mdp5_crtc_get_vblank_counter,
|
||||
.enable_vblank = msm_crtc_enable_vblank,
|
||||
.disable_vblank = msm_crtc_disable_vblank,
|
||||
.get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
|
||||
};
|
||||
|
||||
static const struct drm_crtc_funcs mdp5_crtc_funcs = {
|
||||
.set_config = drm_atomic_helper_set_config,
|
||||
.destroy = mdp5_crtc_destroy,
|
||||
@ -1313,6 +1327,8 @@ struct drm_crtc *mdp5_crtc_init(struct drm_device *dev,
|
||||
mdp5_crtc->lm_cursor_enabled = cursor_plane ? false : true;
|
||||
|
||||
drm_crtc_init_with_planes(dev, crtc, plane, cursor_plane,
|
||||
cursor_plane ?
|
||||
&mdp5_crtc_no_lm_cursor_funcs :
|
||||
&mdp5_crtc_funcs, NULL);
|
||||
|
||||
drm_flip_work_init(&mdp5_crtc->unref_cursor_work,
|
||||
|
@ -1309,14 +1309,14 @@ static int dp_pm_resume(struct device *dev)
|
||||
* can not declared display is connected unless
|
||||
* HDMI cable is plugged in and sink_count of
|
||||
* dongle become 1
|
||||
* also only signal audio when disconnected
|
||||
*/
|
||||
if (dp->link->sink_count)
|
||||
if (dp->link->sink_count) {
|
||||
dp->dp_display.is_connected = true;
|
||||
else
|
||||
} else {
|
||||
dp->dp_display.is_connected = false;
|
||||
|
||||
dp_display_handle_plugged_change(g_dp_display,
|
||||
dp->dp_display.is_connected);
|
||||
dp_display_handle_plugged_change(g_dp_display, false);
|
||||
}
|
||||
|
||||
DRM_DEBUG_DP("After, sink_count=%d is_connected=%d core_inited=%d power_on=%d\n",
|
||||
dp->link->sink_count, dp->dp_display.is_connected,
|
||||
|
@ -215,8 +215,10 @@ int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, struct drm_device *dev,
|
||||
goto fail;
|
||||
}
|
||||
|
||||
if (!msm_dsi_manager_validate_current_config(msm_dsi->id))
|
||||
if (!msm_dsi_manager_validate_current_config(msm_dsi->id)) {
|
||||
ret = -EINVAL;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
msm_dsi->encoder = encoder;
|
||||
|
||||
|
@ -451,7 +451,7 @@ static int dsi_bus_clk_enable(struct msm_dsi_host *msm_host)
|
||||
|
||||
return 0;
|
||||
err:
|
||||
for (; i > 0; i--)
|
||||
while (--i >= 0)
|
||||
clk_disable_unprepare(msm_host->bus_clks[i]);
|
||||
|
||||
return ret;
|
||||
|
@ -110,14 +110,13 @@ static struct dsi_pll_14nm *pll_14nm_list[DSI_MAX];
|
||||
static bool pll_14nm_poll_for_ready(struct dsi_pll_14nm *pll_14nm,
|
||||
u32 nb_tries, u32 timeout_us)
|
||||
{
|
||||
bool pll_locked = false;
|
||||
bool pll_locked = false, pll_ready = false;
|
||||
void __iomem *base = pll_14nm->phy->pll_base;
|
||||
u32 tries, val;
|
||||
|
||||
tries = nb_tries;
|
||||
while (tries--) {
|
||||
val = dsi_phy_read(base +
|
||||
REG_DSI_14nm_PHY_PLL_RESET_SM_READY_STATUS);
|
||||
val = dsi_phy_read(base + REG_DSI_14nm_PHY_PLL_RESET_SM_READY_STATUS);
|
||||
pll_locked = !!(val & BIT(5));
|
||||
|
||||
if (pll_locked)
|
||||
@ -126,23 +125,24 @@ static bool pll_14nm_poll_for_ready(struct dsi_pll_14nm *pll_14nm,
|
||||
udelay(timeout_us);
|
||||
}
|
||||
|
||||
if (!pll_locked) {
|
||||
tries = nb_tries;
|
||||
while (tries--) {
|
||||
val = dsi_phy_read(base +
|
||||
REG_DSI_14nm_PHY_PLL_RESET_SM_READY_STATUS);
|
||||
pll_locked = !!(val & BIT(0));
|
||||
if (!pll_locked)
|
||||
goto out;
|
||||
|
||||
if (pll_locked)
|
||||
break;
|
||||
tries = nb_tries;
|
||||
while (tries--) {
|
||||
val = dsi_phy_read(base + REG_DSI_14nm_PHY_PLL_RESET_SM_READY_STATUS);
|
||||
pll_ready = !!(val & BIT(0));
|
||||
|
||||
udelay(timeout_us);
|
||||
}
|
||||
if (pll_ready)
|
||||
break;
|
||||
|
||||
udelay(timeout_us);
|
||||
}
|
||||
|
||||
DBG("DSI PLL is %slocked", pll_locked ? "" : "*not* ");
|
||||
out:
|
||||
DBG("DSI PLL is %slocked, %sready", pll_locked ? "" : "*not* ", pll_ready ? "" : "*not* ");
|
||||
|
||||
return pll_locked;
|
||||
return pll_locked && pll_ready;
|
||||
}
|
||||
|
||||
static void dsi_pll_14nm_config_init(struct dsi_pll_config *pconf)
|
||||
|
@ -428,7 +428,7 @@ static int pll_28nm_register(struct dsi_pll_28nm *pll_28nm, struct clk_hw **prov
|
||||
bytediv->reg = pll_28nm->phy->pll_base + REG_DSI_28nm_8960_PHY_PLL_CTRL_9;
|
||||
|
||||
snprintf(parent_name, 32, "dsi%dvco_clk", pll_28nm->phy->id);
|
||||
snprintf(clk_name, 32, "dsi%dpllbyte", pll_28nm->phy->id);
|
||||
snprintf(clk_name, 32, "dsi%dpllbyte", pll_28nm->phy->id + 1);
|
||||
|
||||
bytediv_init.name = clk_name;
|
||||
bytediv_init.ops = &clk_bytediv_ops;
|
||||
@ -442,7 +442,7 @@ static int pll_28nm_register(struct dsi_pll_28nm *pll_28nm, struct clk_hw **prov
|
||||
return ret;
|
||||
provided_clocks[DSI_BYTE_PLL_CLK] = &bytediv->hw;
|
||||
|
||||
snprintf(clk_name, 32, "dsi%dpll", pll_28nm->phy->id);
|
||||
snprintf(clk_name, 32, "dsi%dpll", pll_28nm->phy->id + 1);
|
||||
/* DIV3 */
|
||||
hw = devm_clk_hw_register_divider(dev, clk_name,
|
||||
parent_name, 0, pll_28nm->phy->pll_base +
|
||||
|
@ -1116,7 +1116,7 @@ void msm_edp_ctrl_power(struct edp_ctrl *ctrl, bool on)
|
||||
int msm_edp_ctrl_init(struct msm_edp *edp)
|
||||
{
|
||||
struct edp_ctrl *ctrl = NULL;
|
||||
struct device *dev = &edp->pdev->dev;
|
||||
struct device *dev;
|
||||
int ret;
|
||||
|
||||
if (!edp) {
|
||||
@ -1124,6 +1124,7 @@ int msm_edp_ctrl_init(struct msm_edp *edp)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
dev = &edp->pdev->dev;
|
||||
ctrl = devm_kzalloc(dev, sizeof(*ctrl), GFP_KERNEL);
|
||||
if (!ctrl)
|
||||
return -ENOMEM;
|
||||
|
@ -630,10 +630,11 @@ static int msm_drm_init(struct device *dev, const struct drm_driver *drv)
|
||||
if (ret)
|
||||
goto err_msm_uninit;
|
||||
|
||||
ret = msm_disp_snapshot_init(ddev);
|
||||
if (ret)
|
||||
DRM_DEV_ERROR(dev, "msm_disp_snapshot_init failed ret = %d\n", ret);
|
||||
|
||||
if (kms) {
|
||||
ret = msm_disp_snapshot_init(ddev);
|
||||
if (ret)
|
||||
DRM_DEV_ERROR(dev, "msm_disp_snapshot_init failed ret = %d\n", ret);
|
||||
}
|
||||
drm_mode_config_reset(ddev);
|
||||
|
||||
#ifdef CONFIG_DRM_FBDEV_EMULATION
|
||||
@ -682,6 +683,7 @@ static void load_gpu(struct drm_device *dev)
|
||||
|
||||
static int context_init(struct drm_device *dev, struct drm_file *file)
|
||||
{
|
||||
static atomic_t ident = ATOMIC_INIT(0);
|
||||
struct msm_drm_private *priv = dev->dev_private;
|
||||
struct msm_file_private *ctx;
|
||||
|
||||
@ -689,12 +691,17 @@ static int context_init(struct drm_device *dev, struct drm_file *file)
|
||||
if (!ctx)
|
||||
return -ENOMEM;
|
||||
|
||||
INIT_LIST_HEAD(&ctx->submitqueues);
|
||||
rwlock_init(&ctx->queuelock);
|
||||
|
||||
kref_init(&ctx->ref);
|
||||
msm_submitqueue_init(dev, ctx);
|
||||
|
||||
ctx->aspace = msm_gpu_create_private_address_space(priv->gpu, current);
|
||||
file->driver_priv = ctx;
|
||||
|
||||
ctx->seqno = atomic_inc_return(&ident);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -53,14 +53,6 @@ struct msm_disp_state;
|
||||
|
||||
#define FRAC_16_16(mult, div) (((mult) << 16) / (div))
|
||||
|
||||
struct msm_file_private {
|
||||
rwlock_t queuelock;
|
||||
struct list_head submitqueues;
|
||||
int queueid;
|
||||
struct msm_gem_address_space *aspace;
|
||||
struct kref ref;
|
||||
};
|
||||
|
||||
enum msm_mdp_plane_property {
|
||||
PLANE_PROP_ZPOS,
|
||||
PLANE_PROP_ALPHA,
|
||||
@ -488,41 +480,6 @@ void msm_writel(u32 data, void __iomem *addr);
|
||||
u32 msm_readl(const void __iomem *addr);
|
||||
void msm_rmw(void __iomem *addr, u32 mask, u32 or);
|
||||
|
||||
struct msm_gpu_submitqueue;
|
||||
int msm_submitqueue_init(struct drm_device *drm, struct msm_file_private *ctx);
|
||||
struct msm_gpu_submitqueue *msm_submitqueue_get(struct msm_file_private *ctx,
|
||||
u32 id);
|
||||
int msm_submitqueue_create(struct drm_device *drm,
|
||||
struct msm_file_private *ctx,
|
||||
u32 prio, u32 flags, u32 *id);
|
||||
int msm_submitqueue_query(struct drm_device *drm, struct msm_file_private *ctx,
|
||||
struct drm_msm_submitqueue_query *args);
|
||||
int msm_submitqueue_remove(struct msm_file_private *ctx, u32 id);
|
||||
void msm_submitqueue_close(struct msm_file_private *ctx);
|
||||
|
||||
void msm_submitqueue_destroy(struct kref *kref);
|
||||
|
||||
static inline void __msm_file_private_destroy(struct kref *kref)
|
||||
{
|
||||
struct msm_file_private *ctx = container_of(kref,
|
||||
struct msm_file_private, ref);
|
||||
|
||||
msm_gem_address_space_put(ctx->aspace);
|
||||
kfree(ctx);
|
||||
}
|
||||
|
||||
static inline void msm_file_private_put(struct msm_file_private *ctx)
|
||||
{
|
||||
kref_put(&ctx->ref, __msm_file_private_destroy);
|
||||
}
|
||||
|
||||
static inline struct msm_file_private *msm_file_private_get(
|
||||
struct msm_file_private *ctx)
|
||||
{
|
||||
kref_get(&ctx->ref);
|
||||
return ctx;
|
||||
}
|
||||
|
||||
#define DBG(fmt, ...) DRM_DEBUG_DRIVER(fmt"\n", ##__VA_ARGS__)
|
||||
#define VERB(fmt, ...) if (0) DRM_DEBUG_DRIVER(fmt"\n", ##__VA_ARGS__)
|
||||
|
||||
@ -547,7 +504,7 @@ static inline int align_pitch(int width, int bpp)
|
||||
static inline unsigned long timeout_to_jiffies(const ktime_t *timeout)
|
||||
{
|
||||
ktime_t now = ktime_get();
|
||||
unsigned long remaining_jiffies;
|
||||
s64 remaining_jiffies;
|
||||
|
||||
if (ktime_compare(*timeout, now) < 0) {
|
||||
remaining_jiffies = 0;
|
||||
@ -556,7 +513,7 @@ static inline unsigned long timeout_to_jiffies(const ktime_t *timeout)
|
||||
remaining_jiffies = ktime_divns(rem, NSEC_PER_SEC / HZ);
|
||||
}
|
||||
|
||||
return remaining_jiffies;
|
||||
return clamp(remaining_jiffies, 0LL, (s64)INT_MAX);
|
||||
}
|
||||
|
||||
#endif /* __MSM_DRV_H__ */
|
||||
|
@ -46,7 +46,7 @@ static struct msm_gem_submit *submit_create(struct drm_device *dev,
|
||||
if (!submit)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
ret = drm_sched_job_init(&submit->base, &queue->entity, queue);
|
||||
ret = drm_sched_job_init(&submit->base, queue->entity, queue);
|
||||
if (ret) {
|
||||
kfree(submit);
|
||||
return ERR_PTR(ret);
|
||||
@ -171,7 +171,8 @@ out:
|
||||
static int submit_lookup_cmds(struct msm_gem_submit *submit,
|
||||
struct drm_msm_gem_submit *args, struct drm_file *file)
|
||||
{
|
||||
unsigned i, sz;
|
||||
unsigned i;
|
||||
size_t sz;
|
||||
int ret = 0;
|
||||
|
||||
for (i = 0; i < args->nr_cmds; i++) {
|
||||
@ -907,7 +908,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
|
||||
/* The scheduler owns a ref now: */
|
||||
msm_gem_submit_get(submit);
|
||||
|
||||
drm_sched_entity_push_job(&submit->base, &queue->entity);
|
||||
drm_sched_entity_push_job(&submit->base, queue->entity);
|
||||
|
||||
args->fence = submit->fence_id;
|
||||
|
||||
|
@ -257,6 +257,39 @@ struct msm_gpu_perfcntr {
|
||||
*/
|
||||
#define NR_SCHED_PRIORITIES (1 + DRM_SCHED_PRIORITY_HIGH - DRM_SCHED_PRIORITY_MIN)
|
||||
|
||||
/**
|
||||
* struct msm_file_private - per-drm_file context
|
||||
*
|
||||
* @queuelock: synchronizes access to submitqueues list
|
||||
* @submitqueues: list of &msm_gpu_submitqueue created by userspace
|
||||
* @queueid: counter incremented each time a submitqueue is created,
|
||||
* used to assign &msm_gpu_submitqueue.id
|
||||
* @aspace: the per-process GPU address-space
|
||||
* @ref: reference count
|
||||
* @seqno: unique per process seqno
|
||||
*/
|
||||
struct msm_file_private {
|
||||
rwlock_t queuelock;
|
||||
struct list_head submitqueues;
|
||||
int queueid;
|
||||
struct msm_gem_address_space *aspace;
|
||||
struct kref ref;
|
||||
int seqno;
|
||||
|
||||
/**
|
||||
* entities:
|
||||
*
|
||||
* Table of per-priority-level sched entities used by submitqueues
|
||||
* associated with this &drm_file. Because some userspace apps
|
||||
* make assumptions about rendering from multiple gl contexts
|
||||
* (of the same priority) within the process happening in FIFO
|
||||
* order without requiring any fencing beyond MakeCurrent(), we
|
||||
* create at most one &drm_sched_entity per-process per-priority-
|
||||
* level.
|
||||
*/
|
||||
struct drm_sched_entity *entities[NR_SCHED_PRIORITIES * MSM_GPU_MAX_RINGS];
|
||||
};
|
||||
|
||||
/**
|
||||
* msm_gpu_convert_priority - Map userspace priority to ring # and sched priority
|
||||
*
|
||||
@ -304,6 +337,8 @@ static inline int msm_gpu_convert_priority(struct msm_gpu *gpu, int prio,
|
||||
}
|
||||
|
||||
/**
|
||||
* struct msm_gpu_submitqueues - Userspace created context.
|
||||
*
|
||||
* A submitqueue is associated with a gl context or vk queue (or equiv)
|
||||
* in userspace.
|
||||
*
|
||||
@ -321,7 +356,7 @@ static inline int msm_gpu_convert_priority(struct msm_gpu *gpu, int prio,
|
||||
* seqno, protected by submitqueue lock
|
||||
* @lock: submitqueue lock
|
||||
* @ref: reference count
|
||||
* @entity: the submit job-queue
|
||||
* @entity: the submit job-queue
|
||||
*/
|
||||
struct msm_gpu_submitqueue {
|
||||
int id;
|
||||
@ -333,7 +368,7 @@ struct msm_gpu_submitqueue {
|
||||
struct idr fence_idr;
|
||||
struct mutex lock;
|
||||
struct kref ref;
|
||||
struct drm_sched_entity entity;
|
||||
struct drm_sched_entity *entity;
|
||||
};
|
||||
|
||||
struct msm_gpu_state_bo {
|
||||
@ -421,6 +456,33 @@ static inline void gpu_write64(struct msm_gpu *gpu, u32 lo, u32 hi, u64 val)
|
||||
int msm_gpu_pm_suspend(struct msm_gpu *gpu);
|
||||
int msm_gpu_pm_resume(struct msm_gpu *gpu);
|
||||
|
||||
int msm_submitqueue_init(struct drm_device *drm, struct msm_file_private *ctx);
|
||||
struct msm_gpu_submitqueue *msm_submitqueue_get(struct msm_file_private *ctx,
|
||||
u32 id);
|
||||
int msm_submitqueue_create(struct drm_device *drm,
|
||||
struct msm_file_private *ctx,
|
||||
u32 prio, u32 flags, u32 *id);
|
||||
int msm_submitqueue_query(struct drm_device *drm, struct msm_file_private *ctx,
|
||||
struct drm_msm_submitqueue_query *args);
|
||||
int msm_submitqueue_remove(struct msm_file_private *ctx, u32 id);
|
||||
void msm_submitqueue_close(struct msm_file_private *ctx);
|
||||
|
||||
void msm_submitqueue_destroy(struct kref *kref);
|
||||
|
||||
void __msm_file_private_destroy(struct kref *kref);
|
||||
|
||||
static inline void msm_file_private_put(struct msm_file_private *ctx)
|
||||
{
|
||||
kref_put(&ctx->ref, __msm_file_private_destroy);
|
||||
}
|
||||
|
||||
static inline struct msm_file_private *msm_file_private_get(
|
||||
struct msm_file_private *ctx)
|
||||
{
|
||||
kref_get(&ctx->ref);
|
||||
return ctx;
|
||||
}
|
||||
|
||||
void msm_devfreq_init(struct msm_gpu *gpu);
|
||||
void msm_devfreq_cleanup(struct msm_gpu *gpu);
|
||||
void msm_devfreq_resume(struct msm_gpu *gpu);
|
||||
|
@ -151,6 +151,9 @@ void msm_devfreq_active(struct msm_gpu *gpu)
|
||||
unsigned int idle_time;
|
||||
unsigned long target_freq = df->idle_freq;
|
||||
|
||||
if (!df->devfreq)
|
||||
return;
|
||||
|
||||
/*
|
||||
* Hold devfreq lock to synchronize with get_dev_status()/
|
||||
* target() callbacks
|
||||
@ -186,6 +189,9 @@ void msm_devfreq_idle(struct msm_gpu *gpu)
|
||||
struct msm_gpu_devfreq *df = &gpu->devfreq;
|
||||
unsigned long idle_freq, target_freq = 0;
|
||||
|
||||
if (!df->devfreq)
|
||||
return;
|
||||
|
||||
/*
|
||||
* Hold devfreq lock to synchronize with get_dev_status()/
|
||||
* target() callbacks
|
||||
|
@ -7,6 +7,24 @@
|
||||
|
||||
#include "msm_gpu.h"
|
||||
|
||||
void __msm_file_private_destroy(struct kref *kref)
|
||||
{
|
||||
struct msm_file_private *ctx = container_of(kref,
|
||||
struct msm_file_private, ref);
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(ctx->entities); i++) {
|
||||
if (!ctx->entities[i])
|
||||
continue;
|
||||
|
||||
drm_sched_entity_destroy(ctx->entities[i]);
|
||||
kfree(ctx->entities[i]);
|
||||
}
|
||||
|
||||
msm_gem_address_space_put(ctx->aspace);
|
||||
kfree(ctx);
|
||||
}
|
||||
|
||||
void msm_submitqueue_destroy(struct kref *kref)
|
||||
{
|
||||
struct msm_gpu_submitqueue *queue = container_of(kref,
|
||||
@ -14,8 +32,6 @@ void msm_submitqueue_destroy(struct kref *kref)
|
||||
|
||||
idr_destroy(&queue->fence_idr);
|
||||
|
||||
drm_sched_entity_destroy(&queue->entity);
|
||||
|
||||
msm_file_private_put(queue->ctx);
|
||||
|
||||
kfree(queue);
|
||||
@ -61,13 +77,47 @@ void msm_submitqueue_close(struct msm_file_private *ctx)
|
||||
}
|
||||
}
|
||||
|
||||
static struct drm_sched_entity *
|
||||
get_sched_entity(struct msm_file_private *ctx, struct msm_ringbuffer *ring,
|
||||
unsigned ring_nr, enum drm_sched_priority sched_prio)
|
||||
{
|
||||
static DEFINE_MUTEX(entity_lock);
|
||||
unsigned idx = (ring_nr * NR_SCHED_PRIORITIES) + sched_prio;
|
||||
|
||||
/* We should have already validated that the requested priority is
|
||||
* valid by the time we get here.
|
||||
*/
|
||||
if (WARN_ON(idx >= ARRAY_SIZE(ctx->entities)))
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
mutex_lock(&entity_lock);
|
||||
|
||||
if (!ctx->entities[idx]) {
|
||||
struct drm_sched_entity *entity;
|
||||
struct drm_gpu_scheduler *sched = &ring->sched;
|
||||
int ret;
|
||||
|
||||
entity = kzalloc(sizeof(*ctx->entities[idx]), GFP_KERNEL);
|
||||
|
||||
ret = drm_sched_entity_init(entity, sched_prio, &sched, 1, NULL);
|
||||
if (ret) {
|
||||
kfree(entity);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
ctx->entities[idx] = entity;
|
||||
}
|
||||
|
||||
mutex_unlock(&entity_lock);
|
||||
|
||||
return ctx->entities[idx];
|
||||
}
|
||||
|
||||
int msm_submitqueue_create(struct drm_device *drm, struct msm_file_private *ctx,
|
||||
u32 prio, u32 flags, u32 *id)
|
||||
{
|
||||
struct msm_drm_private *priv = drm->dev_private;
|
||||
struct msm_gpu_submitqueue *queue;
|
||||
struct msm_ringbuffer *ring;
|
||||
struct drm_gpu_scheduler *sched;
|
||||
enum drm_sched_priority sched_prio;
|
||||
unsigned ring_nr;
|
||||
int ret;
|
||||
@ -91,12 +141,10 @@ int msm_submitqueue_create(struct drm_device *drm, struct msm_file_private *ctx,
|
||||
queue->flags = flags;
|
||||
queue->ring_nr = ring_nr;
|
||||
|
||||
ring = priv->gpu->rb[ring_nr];
|
||||
sched = &ring->sched;
|
||||
|
||||
ret = drm_sched_entity_init(&queue->entity,
|
||||
sched_prio, &sched, 1, NULL);
|
||||
if (ret) {
|
||||
queue->entity = get_sched_entity(ctx, priv->gpu->rb[ring_nr],
|
||||
ring_nr, sched_prio);
|
||||
if (IS_ERR(queue->entity)) {
|
||||
ret = PTR_ERR(queue->entity);
|
||||
kfree(queue);
|
||||
return ret;
|
||||
}
|
||||
@ -140,10 +188,6 @@ int msm_submitqueue_init(struct drm_device *drm, struct msm_file_private *ctx)
|
||||
*/
|
||||
default_prio = DIV_ROUND_UP(max_priority, 2);
|
||||
|
||||
INIT_LIST_HEAD(&ctx->submitqueues);
|
||||
|
||||
rwlock_init(&ctx->queuelock);
|
||||
|
||||
return msm_submitqueue_create(drm, ctx, default_prio, 0, NULL);
|
||||
}
|
||||
|
||||
|
@ -82,7 +82,7 @@ g84_fifo_chan_engine_fini(struct nvkm_fifo_chan *base,
|
||||
if (offset < 0)
|
||||
return 0;
|
||||
|
||||
engn = fifo->base.func->engine_id(&fifo->base, engine);
|
||||
engn = fifo->base.func->engine_id(&fifo->base, engine) - 1;
|
||||
save = nvkm_mask(device, 0x002520, 0x0000003f, 1 << engn);
|
||||
nvkm_wr32(device, 0x0032fc, chan->base.inst->addr >> 12);
|
||||
done = nvkm_msec(device, 2000,
|
||||
|
@ -295,6 +295,7 @@ config DRM_PANEL_OLIMEX_LCD_OLINUXINO
|
||||
depends on OF
|
||||
depends on I2C
|
||||
depends on BACKLIGHT_CLASS_DEVICE
|
||||
select CRC32
|
||||
help
|
||||
The panel is used with different sizes LCDs, from 480x272 to
|
||||
1280x800, and 24 bit per pixel.
|
||||
|
@ -214,7 +214,7 @@ int drm_ati_pcigart_init(struct drm_device *dev, struct drm_ati_pcigart_info *ga
|
||||
}
|
||||
ret = 0;
|
||||
|
||||
#if defined(__i386__) || defined(__x86_64__)
|
||||
#ifdef CONFIG_X86
|
||||
wbinvd();
|
||||
#else
|
||||
mb();
|
||||
|
@ -86,12 +86,20 @@ int rcar_du_encoder_init(struct rcar_du_device *rcdu,
|
||||
}
|
||||
|
||||
/*
|
||||
* Create and initialize the encoder. On Gen3 skip the LVDS1 output if
|
||||
* Create and initialize the encoder. On Gen3, skip the LVDS1 output if
|
||||
* the LVDS1 encoder is used as a companion for LVDS0 in dual-link
|
||||
* mode.
|
||||
* mode, or any LVDS output if it isn't connected. The latter may happen
|
||||
* on D3 or E3 as the LVDS encoders are needed to provide the pixel
|
||||
* clock to the DU, even when the LVDS outputs are not used.
|
||||
*/
|
||||
if (rcdu->info->gen >= 3 && output == RCAR_DU_OUTPUT_LVDS1) {
|
||||
if (rcar_lvds_dual_link(bridge))
|
||||
if (rcdu->info->gen >= 3) {
|
||||
if (output == RCAR_DU_OUTPUT_LVDS1 &&
|
||||
rcar_lvds_dual_link(bridge))
|
||||
return -ENOLINK;
|
||||
|
||||
if ((output == RCAR_DU_OUTPUT_LVDS0 ||
|
||||
output == RCAR_DU_OUTPUT_LVDS1) &&
|
||||
!rcar_lvds_is_connected(bridge))
|
||||
return -ENOLINK;
|
||||
}
|
||||
|
||||
|
@ -576,6 +576,9 @@ static int rcar_lvds_attach(struct drm_bridge *bridge,
|
||||
{
|
||||
struct rcar_lvds *lvds = bridge_to_rcar_lvds(bridge);
|
||||
|
||||
if (!lvds->next_bridge)
|
||||
return 0;
|
||||
|
||||
return drm_bridge_attach(bridge->encoder, lvds->next_bridge, bridge,
|
||||
flags);
|
||||
}
|
||||
@ -598,6 +601,14 @@ bool rcar_lvds_dual_link(struct drm_bridge *bridge)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(rcar_lvds_dual_link);
|
||||
|
||||
bool rcar_lvds_is_connected(struct drm_bridge *bridge)
|
||||
{
|
||||
struct rcar_lvds *lvds = bridge_to_rcar_lvds(bridge);
|
||||
|
||||
return lvds->next_bridge != NULL;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(rcar_lvds_is_connected);
|
||||
|
||||
/* -----------------------------------------------------------------------------
|
||||
* Probe & Remove
|
||||
*/
|
||||
|
@ -16,6 +16,7 @@ struct drm_bridge;
|
||||
int rcar_lvds_clk_enable(struct drm_bridge *bridge, unsigned long freq);
|
||||
void rcar_lvds_clk_disable(struct drm_bridge *bridge);
|
||||
bool rcar_lvds_dual_link(struct drm_bridge *bridge);
|
||||
bool rcar_lvds_is_connected(struct drm_bridge *bridge);
|
||||
#else
|
||||
static inline int rcar_lvds_clk_enable(struct drm_bridge *bridge,
|
||||
unsigned long freq)
|
||||
@ -27,6 +28,10 @@ static inline bool rcar_lvds_dual_link(struct drm_bridge *bridge)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
static inline bool rcar_lvds_is_connected(struct drm_bridge *bridge)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
#endif /* CONFIG_DRM_RCAR_LVDS */
|
||||
|
||||
#endif /* __RCAR_LVDS_H__ */
|
||||
|
Loading…
Reference in New Issue
Block a user