Merge tag 'drm-misc-next-2017-10-20' of git://anongit.freedesktop.org/drm/drm-misc into drm-next
Final drm-misc feature pull for 4.15: UAPI Changes: - new madvise ioctl for vc4 (Boris) Core Changes: - plane commit tracking fixes (Maarten) - vgaarb improvements for fancy new platforms (aka ppc64 and arm64) by Bjorn Helgaas Driver Changes: - pile of new panel drivers: Toshiba LT089AC19000, Innolux AT043TN24 - more sun4i work to support A10/A20 Tcon and hdmi outputs - vc4: fix sleep in irq handler by making it threaded (Eric) - udl probe/edid read fixes (Robert Tarasov) And a bunch of misc small cleanups/refactors and doc fixes all over. * tag 'drm-misc-next-2017-10-20' of git://anongit.freedesktop.org/drm/drm-misc: (32 commits) drm/vc4: Fix sleeps during the IRQ handler for DSI transactions. drm/vc4: Add the DRM_IOCTL_VC4_GEM_MADVISE ioctl drm/panel: simple: add Toshiba LT089AC19000 dma-fence: remove duplicate word in comment drm/panel: simple: add delays for Innolux AT043TN24 drm/panel: simple: add bus flags for Innolux AT043TN24 drm/panel: simple: fix vertical timings for Innolux AT043TN24 drm/atomic-helper: check that drivers call drm_crtc_vblank_off drm: some KMS todo ideas vgaarb: Factor out EFI and fallback default device selection vgaarb: Select a default VGA device even if there's no legacy VGA drm/bridge: adv7511: Fix a use after free drm/sun4i: Add support for A20 display pipeline components drm/sun4i: Add support for A10 display pipeline components drm/sun4i: hdmi: Support HDMI controller on A10 drm/sun4i: tcon: Add support for A10 TCON drm/sun4i: backend: Support output muxing drm/sun4i: tcon: Move out the tcon0 common setup drm/sun4i: tcon: Don't rely on encoders to set the TCON mode drm/sun4i: tcon: Don't rely on encoders to enable the TCON ...
This commit is contained in:
@@ -0,0 +1,8 @@
|
|||||||
|
Toshiba 8.9" WXGA (1280x768) TFT LCD panel
|
||||||
|
|
||||||
|
Required properties:
|
||||||
|
- compatible: should be "toshiba,lt089ac29000.txt"
|
||||||
|
- power-supply: as specified in the base binding
|
||||||
|
|
||||||
|
This binding is compatible with the simple-panel binding, which is specified
|
||||||
|
in simple-panel.txt in this directory.
|
||||||
@@ -40,6 +40,7 @@ CEC. It is one end of the pipeline.
|
|||||||
|
|
||||||
Required properties:
|
Required properties:
|
||||||
- compatible: value must be one of:
|
- compatible: value must be one of:
|
||||||
|
* allwinner,sun4i-a10-hdmi
|
||||||
* allwinner,sun5i-a10s-hdmi
|
* allwinner,sun5i-a10s-hdmi
|
||||||
* allwinner,sun6i-a31-hdmi
|
* allwinner,sun6i-a31-hdmi
|
||||||
- reg: base address and size of memory-mapped region
|
- reg: base address and size of memory-mapped region
|
||||||
@@ -86,9 +87,11 @@ The TCON acts as a timing controller for RGB, LVDS and TV interfaces.
|
|||||||
|
|
||||||
Required properties:
|
Required properties:
|
||||||
- compatible: value must be either:
|
- compatible: value must be either:
|
||||||
|
* allwinner,sun4i-a10-tcon
|
||||||
* allwinner,sun5i-a13-tcon
|
* allwinner,sun5i-a13-tcon
|
||||||
* allwinner,sun6i-a31-tcon
|
* allwinner,sun6i-a31-tcon
|
||||||
* allwinner,sun6i-a31s-tcon
|
* allwinner,sun6i-a31s-tcon
|
||||||
|
* allwinner,sun7i-a20-tcon
|
||||||
* allwinner,sun8i-a33-tcon
|
* allwinner,sun8i-a33-tcon
|
||||||
* allwinner,sun8i-v3s-tcon
|
* allwinner,sun8i-v3s-tcon
|
||||||
- reg: base address and size of memory-mapped region
|
- reg: base address and size of memory-mapped region
|
||||||
@@ -153,8 +156,10 @@ system.
|
|||||||
|
|
||||||
Required properties:
|
Required properties:
|
||||||
- compatible: value must be one of:
|
- compatible: value must be one of:
|
||||||
|
* allwinner,sun4i-a10-display-backend
|
||||||
* allwinner,sun5i-a13-display-backend
|
* allwinner,sun5i-a13-display-backend
|
||||||
* allwinner,sun6i-a31-display-backend
|
* allwinner,sun6i-a31-display-backend
|
||||||
|
* allwinner,sun7i-a20-display-backend
|
||||||
* allwinner,sun8i-a33-display-backend
|
* allwinner,sun8i-a33-display-backend
|
||||||
- reg: base address and size of the memory-mapped region.
|
- reg: base address and size of the memory-mapped region.
|
||||||
- interrupts: interrupt associated to this IP
|
- interrupts: interrupt associated to this IP
|
||||||
@@ -185,8 +190,10 @@ deinterlacing and color space conversion.
|
|||||||
|
|
||||||
Required properties:
|
Required properties:
|
||||||
- compatible: value must be one of:
|
- compatible: value must be one of:
|
||||||
|
* allwinner,sun4i-a10-display-frontend
|
||||||
* allwinner,sun5i-a13-display-frontend
|
* allwinner,sun5i-a13-display-frontend
|
||||||
* allwinner,sun6i-a31-display-frontend
|
* allwinner,sun6i-a31-display-frontend
|
||||||
|
* allwinner,sun7i-a20-display-frontend
|
||||||
* allwinner,sun8i-a33-display-frontend
|
* allwinner,sun8i-a33-display-frontend
|
||||||
- reg: base address and size of the memory-mapped region.
|
- reg: base address and size of the memory-mapped region.
|
||||||
- interrupts: interrupt associated to this IP
|
- interrupts: interrupt associated to this IP
|
||||||
@@ -231,10 +238,12 @@ extra node.
|
|||||||
|
|
||||||
Required properties:
|
Required properties:
|
||||||
- compatible: value must be one of:
|
- compatible: value must be one of:
|
||||||
|
* allwinner,sun4i-a10-display-engine
|
||||||
* allwinner,sun5i-a10s-display-engine
|
* allwinner,sun5i-a10s-display-engine
|
||||||
* allwinner,sun5i-a13-display-engine
|
* allwinner,sun5i-a13-display-engine
|
||||||
* allwinner,sun6i-a31-display-engine
|
* allwinner,sun6i-a31-display-engine
|
||||||
* allwinner,sun6i-a31s-display-engine
|
* allwinner,sun6i-a31s-display-engine
|
||||||
|
* allwinner,sun7i-a20-display-engine
|
||||||
* allwinner,sun8i-a33-display-engine
|
* allwinner,sun8i-a33-display-engine
|
||||||
* allwinner,sun8i-v3s-display-engine
|
* allwinner,sun8i-v3s-display-engine
|
||||||
|
|
||||||
|
|||||||
@@ -304,6 +304,18 @@ There's a bunch of issues with it:
|
|||||||
|
|
||||||
Contact: Daniel Vetter
|
Contact: Daniel Vetter
|
||||||
|
|
||||||
|
KMS cleanups
|
||||||
|
------------
|
||||||
|
|
||||||
|
Some of these date from the very introduction of KMS in 2008 ...
|
||||||
|
|
||||||
|
- drm_mode_config.crtc_idr is misnamed, since it contains all KMS object. Should
|
||||||
|
be renamed to drm_mode_config.object_idr.
|
||||||
|
|
||||||
|
- drm_display_mode doesn't need to be derived from drm_mode_object. That's
|
||||||
|
leftovers from older (never merged into upstream) KMS designs where modes
|
||||||
|
where set using their ID, including support to add/remove modes.
|
||||||
|
|
||||||
Better Testing
|
Better Testing
|
||||||
==============
|
==============
|
||||||
|
|
||||||
|
|||||||
@@ -1740,15 +1740,3 @@ static void fixup_hide_host_resource_fsl(struct pci_dev *dev)
|
|||||||
}
|
}
|
||||||
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MOTOROLA, PCI_ANY_ID, fixup_hide_host_resource_fsl);
|
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MOTOROLA, PCI_ANY_ID, fixup_hide_host_resource_fsl);
|
||||||
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_FREESCALE, PCI_ANY_ID, fixup_hide_host_resource_fsl);
|
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_FREESCALE, PCI_ANY_ID, fixup_hide_host_resource_fsl);
|
||||||
|
|
||||||
static void fixup_vga(struct pci_dev *pdev)
|
|
||||||
{
|
|
||||||
u16 cmd;
|
|
||||||
|
|
||||||
pci_read_config_word(pdev, PCI_COMMAND, &cmd);
|
|
||||||
if ((cmd & (PCI_COMMAND_IO | PCI_COMMAND_MEMORY)) || !vga_default_device())
|
|
||||||
vga_set_default_device(pdev);
|
|
||||||
|
|
||||||
}
|
|
||||||
DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_ANY_ID, PCI_ANY_ID,
|
|
||||||
PCI_CLASS_DISPLAY_VGA, 8, fixup_vga);
|
|
||||||
|
|||||||
@@ -298,7 +298,7 @@ static void armada_drm_crtc_finish_fb(struct armada_crtc *dcrtc,
|
|||||||
|
|
||||||
if (force) {
|
if (force) {
|
||||||
/* Display is disabled, so just drop the old fb */
|
/* Display is disabled, so just drop the old fb */
|
||||||
drm_framebuffer_unreference(fb);
|
drm_framebuffer_put(fb);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -321,7 +321,7 @@ static void armada_drm_crtc_finish_fb(struct armada_crtc *dcrtc,
|
|||||||
* the best. The worst that will happen is the buffer gets
|
* the best. The worst that will happen is the buffer gets
|
||||||
* reused before it has finished being displayed.
|
* reused before it has finished being displayed.
|
||||||
*/
|
*/
|
||||||
drm_framebuffer_unreference(fb);
|
drm_framebuffer_put(fb);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void armada_drm_vblank_off(struct armada_crtc *dcrtc)
|
static void armada_drm_vblank_off(struct armada_crtc *dcrtc)
|
||||||
@@ -577,7 +577,7 @@ static int armada_drm_crtc_mode_set(struct drm_crtc *crtc,
|
|||||||
unsigned i;
|
unsigned i;
|
||||||
bool interlaced;
|
bool interlaced;
|
||||||
|
|
||||||
drm_framebuffer_reference(crtc->primary->fb);
|
drm_framebuffer_get(crtc->primary->fb);
|
||||||
|
|
||||||
interlaced = !!(adj->flags & DRM_MODE_FLAG_INTERLACE);
|
interlaced = !!(adj->flags & DRM_MODE_FLAG_INTERLACE);
|
||||||
|
|
||||||
@@ -718,7 +718,7 @@ static int armada_drm_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
|
|||||||
MAX_SCHEDULE_TIMEOUT);
|
MAX_SCHEDULE_TIMEOUT);
|
||||||
|
|
||||||
/* Take a reference to the new fb as we're using it */
|
/* Take a reference to the new fb as we're using it */
|
||||||
drm_framebuffer_reference(crtc->primary->fb);
|
drm_framebuffer_get(crtc->primary->fb);
|
||||||
|
|
||||||
/* Update the base in the CRTC */
|
/* Update the base in the CRTC */
|
||||||
armada_drm_crtc_update_regs(dcrtc, regs);
|
armada_drm_crtc_update_regs(dcrtc, regs);
|
||||||
@@ -742,7 +742,7 @@ void armada_drm_crtc_plane_disable(struct armada_crtc *dcrtc,
|
|||||||
* primary plane.
|
* primary plane.
|
||||||
*/
|
*/
|
||||||
if (plane->fb)
|
if (plane->fb)
|
||||||
drm_framebuffer_unreference(plane->fb);
|
drm_framebuffer_put(plane->fb);
|
||||||
|
|
||||||
/* Power down the Y/U/V FIFOs */
|
/* Power down the Y/U/V FIFOs */
|
||||||
sram_para1 = CFG_PDWN16x66 | CFG_PDWN32x66;
|
sram_para1 = CFG_PDWN16x66 | CFG_PDWN32x66;
|
||||||
@@ -947,13 +947,13 @@ static int armada_drm_crtc_cursor_set(struct drm_crtc *crtc,
|
|||||||
|
|
||||||
/* Must be a kernel-mapped object */
|
/* Must be a kernel-mapped object */
|
||||||
if (!obj->addr) {
|
if (!obj->addr) {
|
||||||
drm_gem_object_unreference_unlocked(&obj->obj);
|
drm_gem_object_put_unlocked(&obj->obj);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (obj->obj.size < w * h * 4) {
|
if (obj->obj.size < w * h * 4) {
|
||||||
DRM_ERROR("buffer is too small\n");
|
DRM_ERROR("buffer is too small\n");
|
||||||
drm_gem_object_unreference_unlocked(&obj->obj);
|
drm_gem_object_put_unlocked(&obj->obj);
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -961,7 +961,7 @@ static int armada_drm_crtc_cursor_set(struct drm_crtc *crtc,
|
|||||||
if (dcrtc->cursor_obj) {
|
if (dcrtc->cursor_obj) {
|
||||||
dcrtc->cursor_obj->update = NULL;
|
dcrtc->cursor_obj->update = NULL;
|
||||||
dcrtc->cursor_obj->update_data = NULL;
|
dcrtc->cursor_obj->update_data = NULL;
|
||||||
drm_gem_object_unreference_unlocked(&dcrtc->cursor_obj->obj);
|
drm_gem_object_put_unlocked(&dcrtc->cursor_obj->obj);
|
||||||
}
|
}
|
||||||
dcrtc->cursor_obj = obj;
|
dcrtc->cursor_obj = obj;
|
||||||
dcrtc->cursor_w = w;
|
dcrtc->cursor_w = w;
|
||||||
@@ -997,7 +997,7 @@ static void armada_drm_crtc_destroy(struct drm_crtc *crtc)
|
|||||||
struct armada_private *priv = crtc->dev->dev_private;
|
struct armada_private *priv = crtc->dev->dev_private;
|
||||||
|
|
||||||
if (dcrtc->cursor_obj)
|
if (dcrtc->cursor_obj)
|
||||||
drm_gem_object_unreference_unlocked(&dcrtc->cursor_obj->obj);
|
drm_gem_object_put_unlocked(&dcrtc->cursor_obj->obj);
|
||||||
|
|
||||||
priv->dcrtc[dcrtc->num] = NULL;
|
priv->dcrtc[dcrtc->num] = NULL;
|
||||||
drm_crtc_cleanup(&dcrtc->crtc);
|
drm_crtc_cleanup(&dcrtc->crtc);
|
||||||
@@ -1045,12 +1045,12 @@ static int armada_drm_crtc_page_flip(struct drm_crtc *crtc,
|
|||||||
* Ensure that we hold a reference on the new framebuffer.
|
* Ensure that we hold a reference on the new framebuffer.
|
||||||
* This has to match the behaviour in mode_set.
|
* This has to match the behaviour in mode_set.
|
||||||
*/
|
*/
|
||||||
drm_framebuffer_reference(fb);
|
drm_framebuffer_get(fb);
|
||||||
|
|
||||||
ret = armada_drm_crtc_queue_frame_work(dcrtc, work);
|
ret = armada_drm_crtc_queue_frame_work(dcrtc, work);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
/* Undo our reference above */
|
/* Undo our reference above */
|
||||||
drm_framebuffer_unreference(fb);
|
drm_framebuffer_put(fb);
|
||||||
kfree(work);
|
kfree(work);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -25,7 +25,7 @@ static void armada_drm_unref_work(struct work_struct *work)
|
|||||||
struct drm_framebuffer *fb;
|
struct drm_framebuffer *fb;
|
||||||
|
|
||||||
while (kfifo_get(&priv->fb_unref, &fb))
|
while (kfifo_get(&priv->fb_unref, &fb))
|
||||||
drm_framebuffer_unreference(fb);
|
drm_framebuffer_put(fb);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Must be called with dev->event_lock held */
|
/* Must be called with dev->event_lock held */
|
||||||
|
|||||||
@@ -17,7 +17,7 @@ static void armada_fb_destroy(struct drm_framebuffer *fb)
|
|||||||
struct armada_framebuffer *dfb = drm_fb_to_armada_fb(fb);
|
struct armada_framebuffer *dfb = drm_fb_to_armada_fb(fb);
|
||||||
|
|
||||||
drm_framebuffer_cleanup(&dfb->fb);
|
drm_framebuffer_cleanup(&dfb->fb);
|
||||||
drm_gem_object_unreference_unlocked(&dfb->obj->obj);
|
drm_gem_object_put_unlocked(&dfb->obj->obj);
|
||||||
kfree(dfb);
|
kfree(dfb);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -94,7 +94,7 @@ struct armada_framebuffer *armada_framebuffer_create(struct drm_device *dev,
|
|||||||
* the above call, but the caller will drop their reference
|
* the above call, but the caller will drop their reference
|
||||||
* to it. Hence we need to take our own reference.
|
* to it. Hence we need to take our own reference.
|
||||||
*/
|
*/
|
||||||
drm_gem_object_reference(&obj->obj);
|
drm_gem_object_get(&obj->obj);
|
||||||
|
|
||||||
return dfb;
|
return dfb;
|
||||||
}
|
}
|
||||||
@@ -143,12 +143,12 @@ static struct drm_framebuffer *armada_fb_create(struct drm_device *dev,
|
|||||||
goto err;
|
goto err;
|
||||||
}
|
}
|
||||||
|
|
||||||
drm_gem_object_unreference_unlocked(&obj->obj);
|
drm_gem_object_put_unlocked(&obj->obj);
|
||||||
|
|
||||||
return &dfb->fb;
|
return &dfb->fb;
|
||||||
|
|
||||||
err_unref:
|
err_unref:
|
||||||
drm_gem_object_unreference_unlocked(&obj->obj);
|
drm_gem_object_put_unlocked(&obj->obj);
|
||||||
err:
|
err:
|
||||||
DRM_ERROR("failed to initialize framebuffer: %d\n", ret);
|
DRM_ERROR("failed to initialize framebuffer: %d\n", ret);
|
||||||
return ERR_PTR(ret);
|
return ERR_PTR(ret);
|
||||||
|
|||||||
@@ -51,13 +51,13 @@ static int armada_fb_create(struct drm_fb_helper *fbh,
|
|||||||
|
|
||||||
ret = armada_gem_linear_back(dev, obj);
|
ret = armada_gem_linear_back(dev, obj);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
drm_gem_object_unreference_unlocked(&obj->obj);
|
drm_gem_object_put_unlocked(&obj->obj);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
ptr = armada_gem_map_object(dev, obj);
|
ptr = armada_gem_map_object(dev, obj);
|
||||||
if (!ptr) {
|
if (!ptr) {
|
||||||
drm_gem_object_unreference_unlocked(&obj->obj);
|
drm_gem_object_put_unlocked(&obj->obj);
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -67,7 +67,7 @@ static int armada_fb_create(struct drm_fb_helper *fbh,
|
|||||||
* A reference is now held by the framebuffer object if
|
* A reference is now held by the framebuffer object if
|
||||||
* successful, otherwise this drops the ref for the error path.
|
* successful, otherwise this drops the ref for the error path.
|
||||||
*/
|
*/
|
||||||
drm_gem_object_unreference_unlocked(&obj->obj);
|
drm_gem_object_put_unlocked(&obj->obj);
|
||||||
|
|
||||||
if (IS_ERR(dfb))
|
if (IS_ERR(dfb))
|
||||||
return PTR_ERR(dfb);
|
return PTR_ERR(dfb);
|
||||||
|
|||||||
@@ -265,7 +265,7 @@ int armada_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
|
|||||||
/* drop reference from allocate - handle holds it now */
|
/* drop reference from allocate - handle holds it now */
|
||||||
DRM_DEBUG_DRIVER("obj %p size %zu handle %#x\n", dobj, size, handle);
|
DRM_DEBUG_DRIVER("obj %p size %zu handle %#x\n", dobj, size, handle);
|
||||||
err:
|
err:
|
||||||
drm_gem_object_unreference_unlocked(&dobj->obj);
|
drm_gem_object_put_unlocked(&dobj->obj);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -297,7 +297,7 @@ int armada_gem_create_ioctl(struct drm_device *dev, void *data,
|
|||||||
/* drop reference from allocate - handle holds it now */
|
/* drop reference from allocate - handle holds it now */
|
||||||
DRM_DEBUG_DRIVER("obj %p size %zu handle %#x\n", dobj, size, handle);
|
DRM_DEBUG_DRIVER("obj %p size %zu handle %#x\n", dobj, size, handle);
|
||||||
err:
|
err:
|
||||||
drm_gem_object_unreference_unlocked(&dobj->obj);
|
drm_gem_object_put_unlocked(&dobj->obj);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -314,13 +314,13 @@ int armada_gem_mmap_ioctl(struct drm_device *dev, void *data,
|
|||||||
return -ENOENT;
|
return -ENOENT;
|
||||||
|
|
||||||
if (!dobj->obj.filp) {
|
if (!dobj->obj.filp) {
|
||||||
drm_gem_object_unreference_unlocked(&dobj->obj);
|
drm_gem_object_put_unlocked(&dobj->obj);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
addr = vm_mmap(dobj->obj.filp, 0, args->size, PROT_READ | PROT_WRITE,
|
addr = vm_mmap(dobj->obj.filp, 0, args->size, PROT_READ | PROT_WRITE,
|
||||||
MAP_SHARED, args->offset);
|
MAP_SHARED, args->offset);
|
||||||
drm_gem_object_unreference_unlocked(&dobj->obj);
|
drm_gem_object_put_unlocked(&dobj->obj);
|
||||||
if (IS_ERR_VALUE(addr))
|
if (IS_ERR_VALUE(addr))
|
||||||
return addr;
|
return addr;
|
||||||
|
|
||||||
@@ -375,7 +375,7 @@ int armada_gem_pwrite_ioctl(struct drm_device *dev, void *data,
|
|||||||
}
|
}
|
||||||
|
|
||||||
unref:
|
unref:
|
||||||
drm_gem_object_unreference_unlocked(&dobj->obj);
|
drm_gem_object_put_unlocked(&dobj->obj);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -524,7 +524,7 @@ armada_gem_prime_import(struct drm_device *dev, struct dma_buf *buf)
|
|||||||
* Importing our own dmabuf(s) increases the
|
* Importing our own dmabuf(s) increases the
|
||||||
* refcount on the gem object itself.
|
* refcount on the gem object itself.
|
||||||
*/
|
*/
|
||||||
drm_gem_object_reference(obj);
|
drm_gem_object_get(obj);
|
||||||
return obj;
|
return obj;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -177,7 +177,7 @@ armada_ovl_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
|
|||||||
* Take a reference on the new framebuffer - we want to
|
* Take a reference on the new framebuffer - we want to
|
||||||
* hold on to it while the hardware is displaying it.
|
* hold on to it while the hardware is displaying it.
|
||||||
*/
|
*/
|
||||||
drm_framebuffer_reference(fb);
|
drm_framebuffer_get(fb);
|
||||||
|
|
||||||
if (plane->fb)
|
if (plane->fb)
|
||||||
armada_ovl_retire_fb(dplane, plane->fb);
|
armada_ovl_retire_fb(dplane, plane->fb);
|
||||||
@@ -278,7 +278,7 @@ static int armada_ovl_plane_disable(struct drm_plane *plane,
|
|||||||
|
|
||||||
fb = xchg(&dplane->old_fb, NULL);
|
fb = xchg(&dplane->old_fb, NULL);
|
||||||
if (fb)
|
if (fb)
|
||||||
drm_framebuffer_unreference(fb);
|
drm_framebuffer_put(fb);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -607,10 +607,10 @@ static int adv7511_get_modes(struct adv7511 *adv7511,
|
|||||||
adv7511_set_config_csc(adv7511, connector, adv7511->rgb,
|
adv7511_set_config_csc(adv7511, connector, adv7511->rgb,
|
||||||
drm_detect_hdmi_monitor(edid));
|
drm_detect_hdmi_monitor(edid));
|
||||||
|
|
||||||
kfree(edid);
|
|
||||||
|
|
||||||
cec_s_phys_addr_from_edid(adv7511->cec_adap, edid);
|
cec_s_phys_addr_from_edid(adv7511->cec_adap, edid);
|
||||||
|
|
||||||
|
kfree(edid);
|
||||||
|
|
||||||
return count;
|
return count;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -860,6 +860,7 @@ disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state)
|
|||||||
|
|
||||||
for_each_oldnew_crtc_in_state(old_state, crtc, old_crtc_state, new_crtc_state, i) {
|
for_each_oldnew_crtc_in_state(old_state, crtc, old_crtc_state, new_crtc_state, i) {
|
||||||
const struct drm_crtc_helper_funcs *funcs;
|
const struct drm_crtc_helper_funcs *funcs;
|
||||||
|
int ret;
|
||||||
|
|
||||||
/* Shut down everything that needs a full modeset. */
|
/* Shut down everything that needs a full modeset. */
|
||||||
if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
|
if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
|
||||||
@@ -883,6 +884,14 @@ disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state)
|
|||||||
funcs->disable(crtc);
|
funcs->disable(crtc);
|
||||||
else
|
else
|
||||||
funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
|
funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
|
||||||
|
|
||||||
|
if (!(dev->irq_enabled && dev->num_crtcs))
|
||||||
|
continue;
|
||||||
|
|
||||||
|
ret = drm_crtc_vblank_get(crtc);
|
||||||
|
WARN_ONCE(ret != -EINVAL, "driver forgot to call drm_crtc_vblank_off()\n");
|
||||||
|
if (ret == 0)
|
||||||
|
drm_crtc_vblank_put(crtc);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1772,16 +1781,16 @@ int drm_atomic_helper_setup_commit(struct drm_atomic_state *state,
|
|||||||
}
|
}
|
||||||
|
|
||||||
for_each_oldnew_connector_in_state(state, conn, old_conn_state, new_conn_state, i) {
|
for_each_oldnew_connector_in_state(state, conn, old_conn_state, new_conn_state, i) {
|
||||||
/* commit tracked through new_crtc_state->commit, no need to do it explicitly */
|
|
||||||
if (new_conn_state->crtc)
|
|
||||||
continue;
|
|
||||||
|
|
||||||
/* Userspace is not allowed to get ahead of the previous
|
/* Userspace is not allowed to get ahead of the previous
|
||||||
* commit with nonblocking ones. */
|
* commit with nonblocking ones. */
|
||||||
if (nonblock && old_conn_state->commit &&
|
if (nonblock && old_conn_state->commit &&
|
||||||
!try_wait_for_completion(&old_conn_state->commit->flip_done))
|
!try_wait_for_completion(&old_conn_state->commit->flip_done))
|
||||||
return -EBUSY;
|
return -EBUSY;
|
||||||
|
|
||||||
|
/* commit tracked through new_crtc_state->commit, no need to do it explicitly */
|
||||||
|
if (new_conn_state->crtc)
|
||||||
|
continue;
|
||||||
|
|
||||||
commit = crtc_or_fake_commit(state, old_conn_state->crtc);
|
commit = crtc_or_fake_commit(state, old_conn_state->crtc);
|
||||||
if (!commit)
|
if (!commit)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
@@ -1790,18 +1799,17 @@ int drm_atomic_helper_setup_commit(struct drm_atomic_state *state,
|
|||||||
}
|
}
|
||||||
|
|
||||||
for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
|
for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
|
||||||
/*
|
|
||||||
* Unlike connectors, always track planes explicitly for
|
|
||||||
* async pageflip support.
|
|
||||||
*/
|
|
||||||
|
|
||||||
/* Userspace is not allowed to get ahead of the previous
|
/* Userspace is not allowed to get ahead of the previous
|
||||||
* commit with nonblocking ones. */
|
* commit with nonblocking ones. */
|
||||||
if (nonblock && old_plane_state->commit &&
|
if (nonblock && old_plane_state->commit &&
|
||||||
!try_wait_for_completion(&old_plane_state->commit->flip_done))
|
!try_wait_for_completion(&old_plane_state->commit->flip_done))
|
||||||
return -EBUSY;
|
return -EBUSY;
|
||||||
|
|
||||||
commit = crtc_or_fake_commit(state, old_plane_state->crtc);
|
/*
|
||||||
|
* Unlike connectors, always track planes explicitly for
|
||||||
|
* async pageflip support.
|
||||||
|
*/
|
||||||
|
commit = crtc_or_fake_commit(state, new_plane_state->crtc ?: old_plane_state->crtc);
|
||||||
if (!commit)
|
if (!commit)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
|
|||||||
@@ -112,7 +112,7 @@ struct drm_gem_cma_object *drm_gem_cma_create(struct drm_device *drm,
|
|||||||
cma_obj->vaddr = dma_alloc_wc(drm->dev, size, &cma_obj->paddr,
|
cma_obj->vaddr = dma_alloc_wc(drm->dev, size, &cma_obj->paddr,
|
||||||
GFP_KERNEL | __GFP_NOWARN);
|
GFP_KERNEL | __GFP_NOWARN);
|
||||||
if (!cma_obj->vaddr) {
|
if (!cma_obj->vaddr) {
|
||||||
dev_err(drm->dev, "failed to allocate buffer with size %zu\n",
|
dev_dbg(drm->dev, "failed to allocate buffer with size %zu\n",
|
||||||
size);
|
size);
|
||||||
ret = -ENOMEM;
|
ret = -ENOMEM;
|
||||||
goto error;
|
goto error;
|
||||||
|
|||||||
@@ -1008,6 +1008,10 @@ static const struct panel_desc hitachi_tx23d38vm0caa = {
|
|||||||
.width = 195,
|
.width = 195,
|
||||||
.height = 117,
|
.height = 117,
|
||||||
},
|
},
|
||||||
|
.delay = {
|
||||||
|
.enable = 160,
|
||||||
|
.disable = 160,
|
||||||
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
static const struct drm_display_mode innolux_at043tn24_mode = {
|
static const struct drm_display_mode innolux_at043tn24_mode = {
|
||||||
@@ -1018,8 +1022,8 @@ static const struct drm_display_mode innolux_at043tn24_mode = {
|
|||||||
.htotal = 480 + 2 + 41 + 2,
|
.htotal = 480 + 2 + 41 + 2,
|
||||||
.vdisplay = 272,
|
.vdisplay = 272,
|
||||||
.vsync_start = 272 + 2,
|
.vsync_start = 272 + 2,
|
||||||
.vsync_end = 272 + 2 + 11,
|
.vsync_end = 272 + 2 + 10,
|
||||||
.vtotal = 272 + 2 + 11 + 2,
|
.vtotal = 272 + 2 + 10 + 2,
|
||||||
.vrefresh = 60,
|
.vrefresh = 60,
|
||||||
.flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
|
.flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
|
||||||
};
|
};
|
||||||
@@ -1033,6 +1037,7 @@ static const struct panel_desc innolux_at043tn24 = {
|
|||||||
.height = 54,
|
.height = 54,
|
||||||
},
|
},
|
||||||
.bus_format = MEDIA_BUS_FMT_RGB888_1X24,
|
.bus_format = MEDIA_BUS_FMT_RGB888_1X24,
|
||||||
|
.bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_POSEDGE,
|
||||||
};
|
};
|
||||||
|
|
||||||
static const struct drm_display_mode innolux_at070tn92_mode = {
|
static const struct drm_display_mode innolux_at070tn92_mode = {
|
||||||
@@ -1832,6 +1837,30 @@ static const struct panel_desc tianma_tm070jdhg30 = {
|
|||||||
.bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
|
.bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static const struct drm_display_mode toshiba_lt089ac29000_mode = {
|
||||||
|
.clock = 79500,
|
||||||
|
.hdisplay = 1280,
|
||||||
|
.hsync_start = 1280 + 192,
|
||||||
|
.hsync_end = 1280 + 192 + 128,
|
||||||
|
.htotal = 1280 + 192 + 128 + 64,
|
||||||
|
.vdisplay = 768,
|
||||||
|
.vsync_start = 768 + 20,
|
||||||
|
.vsync_end = 768 + 20 + 7,
|
||||||
|
.vtotal = 768 + 20 + 7 + 3,
|
||||||
|
.vrefresh = 60,
|
||||||
|
};
|
||||||
|
|
||||||
|
static const struct panel_desc toshiba_lt089ac29000 = {
|
||||||
|
.modes = &toshiba_lt089ac29000_mode,
|
||||||
|
.num_modes = 1,
|
||||||
|
.size = {
|
||||||
|
.width = 194,
|
||||||
|
.height = 116,
|
||||||
|
},
|
||||||
|
.bus_format = MEDIA_BUS_FMT_RGB888_1X24,
|
||||||
|
.bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_POSEDGE,
|
||||||
|
};
|
||||||
|
|
||||||
static const struct drm_display_mode tpk_f07a_0102_mode = {
|
static const struct drm_display_mode tpk_f07a_0102_mode = {
|
||||||
.clock = 33260,
|
.clock = 33260,
|
||||||
.hdisplay = 800,
|
.hdisplay = 800,
|
||||||
@@ -2113,6 +2142,9 @@ static const struct of_device_id platform_of_match[] = {
|
|||||||
}, {
|
}, {
|
||||||
.compatible = "tianma,tm070jdhg30",
|
.compatible = "tianma,tm070jdhg30",
|
||||||
.data = &tianma_tm070jdhg30,
|
.data = &tianma_tm070jdhg30,
|
||||||
|
}, {
|
||||||
|
.compatible = "toshiba,lt089ac29000",
|
||||||
|
.data = &toshiba_lt089ac29000,
|
||||||
}, {
|
}, {
|
||||||
.compatible = "tpk,f07a-0102",
|
.compatible = "tpk,f07a-0102",
|
||||||
.data = &tpk_f07a_0102,
|
.data = &tpk_f07a_0102,
|
||||||
|
|||||||
@@ -1,23 +1,24 @@
|
|||||||
|
sun4i-backend-y += sun4i_backend.o sun4i_layer.o
|
||||||
|
|
||||||
sun4i-drm-y += sun4i_drv.o
|
sun4i-drm-y += sun4i_drv.o
|
||||||
sun4i-drm-y += sun4i_framebuffer.o
|
sun4i-drm-y += sun4i_framebuffer.o
|
||||||
|
|
||||||
|
sun4i-drm-hdmi-y += sun4i_hdmi_ddc_clk.o
|
||||||
sun4i-drm-hdmi-y += sun4i_hdmi_enc.o
|
sun4i-drm-hdmi-y += sun4i_hdmi_enc.o
|
||||||
sun4i-drm-hdmi-y += sun4i_hdmi_i2c.o
|
sun4i-drm-hdmi-y += sun4i_hdmi_i2c.o
|
||||||
sun4i-drm-hdmi-y += sun4i_hdmi_ddc_clk.o
|
|
||||||
sun4i-drm-hdmi-y += sun4i_hdmi_tmds_clk.o
|
sun4i-drm-hdmi-y += sun4i_hdmi_tmds_clk.o
|
||||||
|
|
||||||
sun4i-tcon-y += sun4i_tcon.o
|
|
||||||
sun4i-tcon-y += sun4i_rgb.o
|
|
||||||
sun4i-tcon-y += sun4i_dotclock.o
|
|
||||||
sun4i-tcon-y += sun4i_crtc.o
|
|
||||||
|
|
||||||
sun4i-backend-y += sun4i_backend.o sun4i_layer.o
|
|
||||||
|
|
||||||
sun8i-mixer-y += sun8i_mixer.o sun8i_layer.o
|
sun8i-mixer-y += sun8i_mixer.o sun8i_layer.o
|
||||||
|
|
||||||
obj-$(CONFIG_DRM_SUN4I) += sun4i-drm.o sun4i-tcon.o
|
sun4i-tcon-y += sun4i_crtc.o
|
||||||
obj-$(CONFIG_DRM_SUN4I) += sun6i_drc.o
|
sun4i-tcon-y += sun4i_dotclock.o
|
||||||
|
sun4i-tcon-y += sun4i_tcon.o
|
||||||
|
sun4i-tcon-y += sun4i_rgb.o
|
||||||
|
|
||||||
|
obj-$(CONFIG_DRM_SUN4I) += sun4i-drm.o
|
||||||
|
obj-$(CONFIG_DRM_SUN4I) += sun4i-tcon.o
|
||||||
obj-$(CONFIG_DRM_SUN4I) += sun4i_tv.o
|
obj-$(CONFIG_DRM_SUN4I) += sun4i_tv.o
|
||||||
|
obj-$(CONFIG_DRM_SUN4I) += sun6i_drc.o
|
||||||
|
|
||||||
obj-$(CONFIG_DRM_SUN4I_BACKEND) += sun4i-backend.o
|
obj-$(CONFIG_DRM_SUN4I_BACKEND) += sun4i-backend.o
|
||||||
obj-$(CONFIG_DRM_SUN4I_HDMI) += sun4i-drm-hdmi.o
|
obj-$(CONFIG_DRM_SUN4I_HDMI) += sun4i-drm-hdmi.o
|
||||||
|
|||||||
@@ -20,6 +20,7 @@
|
|||||||
|
|
||||||
#include <linux/component.h>
|
#include <linux/component.h>
|
||||||
#include <linux/list.h>
|
#include <linux/list.h>
|
||||||
|
#include <linux/of_device.h>
|
||||||
#include <linux/of_graph.h>
|
#include <linux/of_graph.h>
|
||||||
#include <linux/reset.h>
|
#include <linux/reset.h>
|
||||||
|
|
||||||
@@ -28,6 +29,11 @@
|
|||||||
#include "sun4i_layer.h"
|
#include "sun4i_layer.h"
|
||||||
#include "sunxi_engine.h"
|
#include "sunxi_engine.h"
|
||||||
|
|
||||||
|
struct sun4i_backend_quirks {
|
||||||
|
/* backend <-> TCON muxing selection done in backend */
|
||||||
|
bool needs_output_muxing;
|
||||||
|
};
|
||||||
|
|
||||||
static const u32 sunxi_rgb2yuv_coef[12] = {
|
static const u32 sunxi_rgb2yuv_coef[12] = {
|
||||||
0x00000107, 0x00000204, 0x00000064, 0x00000108,
|
0x00000107, 0x00000204, 0x00000064, 0x00000108,
|
||||||
0x00003f69, 0x00003ed6, 0x000001c1, 0x00000808,
|
0x00003f69, 0x00003ed6, 0x000001c1, 0x00000808,
|
||||||
@@ -216,6 +222,13 @@ int sun4i_backend_update_layer_buffer(struct sun4i_backend *backend,
|
|||||||
paddr = drm_fb_cma_get_gem_addr(fb, state, 0);
|
paddr = drm_fb_cma_get_gem_addr(fb, state, 0);
|
||||||
DRM_DEBUG_DRIVER("Setting buffer address to %pad\n", &paddr);
|
DRM_DEBUG_DRIVER("Setting buffer address to %pad\n", &paddr);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* backend DMA accesses DRAM directly, bypassing the system
|
||||||
|
* bus. As such, the address range is different and the buffer
|
||||||
|
* address needs to be corrected.
|
||||||
|
*/
|
||||||
|
paddr -= PHYS_OFFSET;
|
||||||
|
|
||||||
/* Write the 32 lower bits of the address (in bits) */
|
/* Write the 32 lower bits of the address (in bits) */
|
||||||
lo_paddr = paddr << 3;
|
lo_paddr = paddr << 3;
|
||||||
DRM_DEBUG_DRIVER("Setting address lower bits to 0x%x\n", lo_paddr);
|
DRM_DEBUG_DRIVER("Setting address lower bits to 0x%x\n", lo_paddr);
|
||||||
@@ -338,6 +351,7 @@ static int sun4i_backend_bind(struct device *dev, struct device *master,
|
|||||||
struct drm_device *drm = data;
|
struct drm_device *drm = data;
|
||||||
struct sun4i_drv *drv = drm->dev_private;
|
struct sun4i_drv *drv = drm->dev_private;
|
||||||
struct sun4i_backend *backend;
|
struct sun4i_backend *backend;
|
||||||
|
const struct sun4i_backend_quirks *quirks;
|
||||||
struct resource *res;
|
struct resource *res;
|
||||||
void __iomem *regs;
|
void __iomem *regs;
|
||||||
int i, ret;
|
int i, ret;
|
||||||
@@ -432,6 +446,27 @@ static int sun4i_backend_bind(struct device *dev, struct device *master,
|
|||||||
SUN4I_BACKEND_MODCTL_DEBE_EN |
|
SUN4I_BACKEND_MODCTL_DEBE_EN |
|
||||||
SUN4I_BACKEND_MODCTL_START_CTL);
|
SUN4I_BACKEND_MODCTL_START_CTL);
|
||||||
|
|
||||||
|
/* Set output selection if needed */
|
||||||
|
quirks = of_device_get_match_data(dev);
|
||||||
|
if (quirks->needs_output_muxing) {
|
||||||
|
/*
|
||||||
|
* We assume there is no dynamic muxing of backends
|
||||||
|
* and TCONs, so we select the backend with same ID.
|
||||||
|
*
|
||||||
|
* While dynamic selection might be interesting, since
|
||||||
|
* the CRTC is tied to the TCON, while the layers are
|
||||||
|
* tied to the backends, this means, we will need to
|
||||||
|
* switch between groups of layers. There might not be
|
||||||
|
* a way to represent this constraint in DRM.
|
||||||
|
*/
|
||||||
|
regmap_update_bits(backend->engine.regs,
|
||||||
|
SUN4I_BACKEND_MODCTL_REG,
|
||||||
|
SUN4I_BACKEND_MODCTL_OUT_SEL,
|
||||||
|
(backend->engine.id
|
||||||
|
? SUN4I_BACKEND_MODCTL_OUT_LCD1
|
||||||
|
: SUN4I_BACKEND_MODCTL_OUT_LCD0));
|
||||||
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
err_disable_ram_clk:
|
err_disable_ram_clk:
|
||||||
@@ -479,10 +514,44 @@ static int sun4i_backend_remove(struct platform_device *pdev)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static const struct sun4i_backend_quirks sun4i_backend_quirks = {
|
||||||
|
.needs_output_muxing = true,
|
||||||
|
};
|
||||||
|
|
||||||
|
static const struct sun4i_backend_quirks sun5i_backend_quirks = {
|
||||||
|
};
|
||||||
|
|
||||||
|
static const struct sun4i_backend_quirks sun6i_backend_quirks = {
|
||||||
|
};
|
||||||
|
|
||||||
|
static const struct sun4i_backend_quirks sun7i_backend_quirks = {
|
||||||
|
.needs_output_muxing = true,
|
||||||
|
};
|
||||||
|
|
||||||
|
static const struct sun4i_backend_quirks sun8i_a33_backend_quirks = {
|
||||||
|
};
|
||||||
|
|
||||||
static const struct of_device_id sun4i_backend_of_table[] = {
|
static const struct of_device_id sun4i_backend_of_table[] = {
|
||||||
{ .compatible = "allwinner,sun5i-a13-display-backend" },
|
{
|
||||||
{ .compatible = "allwinner,sun6i-a31-display-backend" },
|
.compatible = "allwinner,sun4i-a10-display-backend",
|
||||||
{ .compatible = "allwinner,sun8i-a33-display-backend" },
|
.data = &sun4i_backend_quirks,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
.compatible = "allwinner,sun5i-a13-display-backend",
|
||||||
|
.data = &sun5i_backend_quirks,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
.compatible = "allwinner,sun6i-a31-display-backend",
|
||||||
|
.data = &sun6i_backend_quirks,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
.compatible = "allwinner,sun7i-a20-display-backend",
|
||||||
|
.data = &sun7i_backend_quirks,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
.compatible = "allwinner,sun8i-a33-display-backend",
|
||||||
|
.data = &sun8i_a33_backend_quirks,
|
||||||
|
},
|
||||||
{ }
|
{ }
|
||||||
};
|
};
|
||||||
MODULE_DEVICE_TABLE(of, sun4i_backend_of_table);
|
MODULE_DEVICE_TABLE(of, sun4i_backend_of_table);
|
||||||
|
|||||||
@@ -25,7 +25,8 @@
|
|||||||
#define SUN4I_BACKEND_MODCTL_LINE_SEL BIT(29)
|
#define SUN4I_BACKEND_MODCTL_LINE_SEL BIT(29)
|
||||||
#define SUN4I_BACKEND_MODCTL_ITLMOD_EN BIT(28)
|
#define SUN4I_BACKEND_MODCTL_ITLMOD_EN BIT(28)
|
||||||
#define SUN4I_BACKEND_MODCTL_OUT_SEL GENMASK(22, 20)
|
#define SUN4I_BACKEND_MODCTL_OUT_SEL GENMASK(22, 20)
|
||||||
#define SUN4I_BACKEND_MODCTL_OUT_LCD (0 << 20)
|
#define SUN4I_BACKEND_MODCTL_OUT_LCD0 (0 << 20)
|
||||||
|
#define SUN4I_BACKEND_MODCTL_OUT_LCD1 (1 << 20)
|
||||||
#define SUN4I_BACKEND_MODCTL_OUT_FE0 (6 << 20)
|
#define SUN4I_BACKEND_MODCTL_OUT_FE0 (6 << 20)
|
||||||
#define SUN4I_BACKEND_MODCTL_OUT_FE1 (7 << 20)
|
#define SUN4I_BACKEND_MODCTL_OUT_FE1 (7 << 20)
|
||||||
#define SUN4I_BACKEND_MODCTL_HWC_EN BIT(16)
|
#define SUN4I_BACKEND_MODCTL_HWC_EN BIT(16)
|
||||||
|
|||||||
@@ -30,6 +30,22 @@
|
|||||||
#include "sunxi_engine.h"
|
#include "sunxi_engine.h"
|
||||||
#include "sun4i_tcon.h"
|
#include "sun4i_tcon.h"
|
||||||
|
|
||||||
|
/*
|
||||||
|
* While this isn't really working in the DRM theory, in practice we
|
||||||
|
* can only ever have one encoder per TCON since we have a mux in our
|
||||||
|
* TCON.
|
||||||
|
*/
|
||||||
|
static struct drm_encoder *sun4i_crtc_get_encoder(struct drm_crtc *crtc)
|
||||||
|
{
|
||||||
|
struct drm_encoder *encoder;
|
||||||
|
|
||||||
|
drm_for_each_encoder(encoder, crtc->dev)
|
||||||
|
if (encoder->crtc == crtc)
|
||||||
|
return encoder;
|
||||||
|
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
static void sun4i_crtc_atomic_begin(struct drm_crtc *crtc,
|
static void sun4i_crtc_atomic_begin(struct drm_crtc *crtc,
|
||||||
struct drm_crtc_state *old_state)
|
struct drm_crtc_state *old_state)
|
||||||
{
|
{
|
||||||
@@ -72,11 +88,12 @@ static void sun4i_crtc_atomic_flush(struct drm_crtc *crtc,
|
|||||||
static void sun4i_crtc_atomic_disable(struct drm_crtc *crtc,
|
static void sun4i_crtc_atomic_disable(struct drm_crtc *crtc,
|
||||||
struct drm_crtc_state *old_state)
|
struct drm_crtc_state *old_state)
|
||||||
{
|
{
|
||||||
|
struct drm_encoder *encoder = sun4i_crtc_get_encoder(crtc);
|
||||||
struct sun4i_crtc *scrtc = drm_crtc_to_sun4i_crtc(crtc);
|
struct sun4i_crtc *scrtc = drm_crtc_to_sun4i_crtc(crtc);
|
||||||
|
|
||||||
DRM_DEBUG_DRIVER("Disabling the CRTC\n");
|
DRM_DEBUG_DRIVER("Disabling the CRTC\n");
|
||||||
|
|
||||||
sun4i_tcon_disable(scrtc->tcon);
|
sun4i_tcon_set_status(scrtc->tcon, encoder, false);
|
||||||
|
|
||||||
if (crtc->state->event && !crtc->state->active) {
|
if (crtc->state->event && !crtc->state->active) {
|
||||||
spin_lock_irq(&crtc->dev->event_lock);
|
spin_lock_irq(&crtc->dev->event_lock);
|
||||||
@@ -90,11 +107,21 @@ static void sun4i_crtc_atomic_disable(struct drm_crtc *crtc,
|
|||||||
static void sun4i_crtc_atomic_enable(struct drm_crtc *crtc,
|
static void sun4i_crtc_atomic_enable(struct drm_crtc *crtc,
|
||||||
struct drm_crtc_state *old_state)
|
struct drm_crtc_state *old_state)
|
||||||
{
|
{
|
||||||
|
struct drm_encoder *encoder = sun4i_crtc_get_encoder(crtc);
|
||||||
struct sun4i_crtc *scrtc = drm_crtc_to_sun4i_crtc(crtc);
|
struct sun4i_crtc *scrtc = drm_crtc_to_sun4i_crtc(crtc);
|
||||||
|
|
||||||
DRM_DEBUG_DRIVER("Enabling the CRTC\n");
|
DRM_DEBUG_DRIVER("Enabling the CRTC\n");
|
||||||
|
|
||||||
sun4i_tcon_enable(scrtc->tcon);
|
sun4i_tcon_set_status(scrtc->tcon, encoder, true);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void sun4i_crtc_mode_set_nofb(struct drm_crtc *crtc)
|
||||||
|
{
|
||||||
|
struct drm_display_mode *mode = &crtc->state->adjusted_mode;
|
||||||
|
struct drm_encoder *encoder = sun4i_crtc_get_encoder(crtc);
|
||||||
|
struct sun4i_crtc *scrtc = drm_crtc_to_sun4i_crtc(crtc);
|
||||||
|
|
||||||
|
sun4i_tcon_mode_set(scrtc->tcon, encoder, mode);
|
||||||
}
|
}
|
||||||
|
|
||||||
static const struct drm_crtc_helper_funcs sun4i_crtc_helper_funcs = {
|
static const struct drm_crtc_helper_funcs sun4i_crtc_helper_funcs = {
|
||||||
@@ -102,6 +129,7 @@ static const struct drm_crtc_helper_funcs sun4i_crtc_helper_funcs = {
|
|||||||
.atomic_flush = sun4i_crtc_atomic_flush,
|
.atomic_flush = sun4i_crtc_atomic_flush,
|
||||||
.atomic_enable = sun4i_crtc_atomic_enable,
|
.atomic_enable = sun4i_crtc_atomic_enable,
|
||||||
.atomic_disable = sun4i_crtc_atomic_disable,
|
.atomic_disable = sun4i_crtc_atomic_disable,
|
||||||
|
.mode_set_nofb = sun4i_crtc_mode_set_nofb,
|
||||||
};
|
};
|
||||||
|
|
||||||
static int sun4i_crtc_enable_vblank(struct drm_crtc *crtc)
|
static int sun4i_crtc_enable_vblank(struct drm_crtc *crtc)
|
||||||
|
|||||||
@@ -11,6 +11,7 @@
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
#include <linux/component.h>
|
#include <linux/component.h>
|
||||||
|
#include <linux/kfifo.h>
|
||||||
#include <linux/of_graph.h>
|
#include <linux/of_graph.h>
|
||||||
#include <linux/of_reserved_mem.h>
|
#include <linux/of_reserved_mem.h>
|
||||||
|
|
||||||
@@ -177,16 +178,20 @@ static bool sun4i_drv_node_is_connector(struct device_node *node)
|
|||||||
|
|
||||||
static bool sun4i_drv_node_is_frontend(struct device_node *node)
|
static bool sun4i_drv_node_is_frontend(struct device_node *node)
|
||||||
{
|
{
|
||||||
return of_device_is_compatible(node, "allwinner,sun5i-a13-display-frontend") ||
|
return of_device_is_compatible(node, "allwinner,sun4i-a10-display-frontend") ||
|
||||||
|
of_device_is_compatible(node, "allwinner,sun5i-a13-display-frontend") ||
|
||||||
of_device_is_compatible(node, "allwinner,sun6i-a31-display-frontend") ||
|
of_device_is_compatible(node, "allwinner,sun6i-a31-display-frontend") ||
|
||||||
|
of_device_is_compatible(node, "allwinner,sun7i-a20-display-frontend") ||
|
||||||
of_device_is_compatible(node, "allwinner,sun8i-a33-display-frontend");
|
of_device_is_compatible(node, "allwinner,sun8i-a33-display-frontend");
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool sun4i_drv_node_is_tcon(struct device_node *node)
|
static bool sun4i_drv_node_is_tcon(struct device_node *node)
|
||||||
{
|
{
|
||||||
return of_device_is_compatible(node, "allwinner,sun5i-a13-tcon") ||
|
return of_device_is_compatible(node, "allwinner,sun4i-a10-tcon") ||
|
||||||
|
of_device_is_compatible(node, "allwinner,sun5i-a13-tcon") ||
|
||||||
of_device_is_compatible(node, "allwinner,sun6i-a31-tcon") ||
|
of_device_is_compatible(node, "allwinner,sun6i-a31-tcon") ||
|
||||||
of_device_is_compatible(node, "allwinner,sun6i-a31s-tcon") ||
|
of_device_is_compatible(node, "allwinner,sun6i-a31s-tcon") ||
|
||||||
|
of_device_is_compatible(node, "allwinner,sun7i-a20-tcon") ||
|
||||||
of_device_is_compatible(node, "allwinner,sun8i-a33-tcon") ||
|
of_device_is_compatible(node, "allwinner,sun8i-a33-tcon") ||
|
||||||
of_device_is_compatible(node, "allwinner,sun8i-v3s-tcon");
|
of_device_is_compatible(node, "allwinner,sun8i-v3s-tcon");
|
||||||
}
|
}
|
||||||
@@ -222,29 +227,15 @@ static int compare_of(struct device *dev, void *data)
|
|||||||
* matching system handles this for us.
|
* matching system handles this for us.
|
||||||
*/
|
*/
|
||||||
struct endpoint_list {
|
struct endpoint_list {
|
||||||
struct device_node *node;
|
DECLARE_KFIFO(fifo, struct device_node *, 16);
|
||||||
struct list_head list;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
static bool node_is_in_list(struct list_head *endpoints,
|
|
||||||
struct device_node *node)
|
|
||||||
{
|
|
||||||
struct endpoint_list *endpoint;
|
|
||||||
|
|
||||||
list_for_each_entry(endpoint, endpoints, list)
|
|
||||||
if (endpoint->node == node)
|
|
||||||
return true;
|
|
||||||
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int sun4i_drv_add_endpoints(struct device *dev,
|
static int sun4i_drv_add_endpoints(struct device *dev,
|
||||||
struct list_head *endpoints,
|
struct endpoint_list *list,
|
||||||
struct component_match **match,
|
struct component_match **match,
|
||||||
struct device_node *node)
|
struct device_node *node)
|
||||||
{
|
{
|
||||||
struct device_node *port, *ep, *remote;
|
struct device_node *port, *ep, *remote;
|
||||||
struct endpoint_list *endpoint;
|
|
||||||
int count = 0;
|
int count = 0;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@@ -304,19 +295,7 @@ static int sun4i_drv_add_endpoints(struct device *dev,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* skip downstream node if it is already in the queue */
|
kfifo_put(&list->fifo, remote);
|
||||||
if (node_is_in_list(endpoints, remote))
|
|
||||||
continue;
|
|
||||||
|
|
||||||
/* Add downstream nodes to the queue */
|
|
||||||
endpoint = kzalloc(sizeof(*endpoint), GFP_KERNEL);
|
|
||||||
if (!endpoint) {
|
|
||||||
of_node_put(remote);
|
|
||||||
return -ENOMEM;
|
|
||||||
}
|
|
||||||
|
|
||||||
endpoint->node = remote;
|
|
||||||
list_add_tail(&endpoint->list, endpoints);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return count;
|
return count;
|
||||||
@@ -325,10 +304,11 @@ static int sun4i_drv_add_endpoints(struct device *dev,
|
|||||||
static int sun4i_drv_probe(struct platform_device *pdev)
|
static int sun4i_drv_probe(struct platform_device *pdev)
|
||||||
{
|
{
|
||||||
struct component_match *match = NULL;
|
struct component_match *match = NULL;
|
||||||
struct device_node *np = pdev->dev.of_node;
|
struct device_node *np = pdev->dev.of_node, *endpoint;
|
||||||
struct endpoint_list *endpoint, *endpoint_temp;
|
struct endpoint_list list;
|
||||||
int i, ret, count = 0;
|
int i, ret, count = 0;
|
||||||
LIST_HEAD(endpoints);
|
|
||||||
|
INIT_KFIFO(list.fifo);
|
||||||
|
|
||||||
for (i = 0;; i++) {
|
for (i = 0;; i++) {
|
||||||
struct device_node *pipeline = of_parse_phandle(np,
|
struct device_node *pipeline = of_parse_phandle(np,
|
||||||
@@ -337,31 +317,19 @@ static int sun4i_drv_probe(struct platform_device *pdev)
|
|||||||
if (!pipeline)
|
if (!pipeline)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
endpoint = kzalloc(sizeof(*endpoint), GFP_KERNEL);
|
kfifo_put(&list.fifo, pipeline);
|
||||||
if (!endpoint) {
|
|
||||||
ret = -ENOMEM;
|
|
||||||
goto err_free_endpoints;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
endpoint->node = pipeline;
|
while (kfifo_get(&list.fifo, &endpoint)) {
|
||||||
list_add_tail(&endpoint->list, &endpoints);
|
|
||||||
}
|
|
||||||
|
|
||||||
list_for_each_entry_safe(endpoint, endpoint_temp, &endpoints, list) {
|
|
||||||
/* process this endpoint */
|
/* process this endpoint */
|
||||||
ret = sun4i_drv_add_endpoints(&pdev->dev, &endpoints, &match,
|
ret = sun4i_drv_add_endpoints(&pdev->dev, &list, &match,
|
||||||
endpoint->node);
|
endpoint);
|
||||||
|
|
||||||
/* sun4i_drv_add_endpoints can fail to allocate memory */
|
/* sun4i_drv_add_endpoints can fail to allocate memory */
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
goto err_free_endpoints;
|
return ret;
|
||||||
|
|
||||||
count += ret;
|
count += ret;
|
||||||
|
|
||||||
/* delete and cleanup the current entry */
|
|
||||||
list_del(&endpoint->list);
|
|
||||||
of_node_put(endpoint->node);
|
|
||||||
kfree(endpoint);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (count)
|
if (count)
|
||||||
@@ -370,15 +338,6 @@ static int sun4i_drv_probe(struct platform_device *pdev)
|
|||||||
match);
|
match);
|
||||||
else
|
else
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
err_free_endpoints:
|
|
||||||
list_for_each_entry_safe(endpoint, endpoint_temp, &endpoints, list) {
|
|
||||||
list_del(&endpoint->list);
|
|
||||||
of_node_put(endpoint->node);
|
|
||||||
kfree(endpoint);
|
|
||||||
}
|
|
||||||
|
|
||||||
return ret;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int sun4i_drv_remove(struct platform_device *pdev)
|
static int sun4i_drv_remove(struct platform_device *pdev)
|
||||||
@@ -387,10 +346,12 @@ static int sun4i_drv_remove(struct platform_device *pdev)
|
|||||||
}
|
}
|
||||||
|
|
||||||
static const struct of_device_id sun4i_drv_of_table[] = {
|
static const struct of_device_id sun4i_drv_of_table[] = {
|
||||||
|
{ .compatible = "allwinner,sun4i-a10-display-engine" },
|
||||||
{ .compatible = "allwinner,sun5i-a10s-display-engine" },
|
{ .compatible = "allwinner,sun5i-a10s-display-engine" },
|
||||||
{ .compatible = "allwinner,sun5i-a13-display-engine" },
|
{ .compatible = "allwinner,sun5i-a13-display-engine" },
|
||||||
{ .compatible = "allwinner,sun6i-a31-display-engine" },
|
{ .compatible = "allwinner,sun6i-a31-display-engine" },
|
||||||
{ .compatible = "allwinner,sun6i-a31s-display-engine" },
|
{ .compatible = "allwinner,sun6i-a31s-display-engine" },
|
||||||
|
{ .compatible = "allwinner,sun7i-a20-display-engine" },
|
||||||
{ .compatible = "allwinner,sun8i-a33-display-engine" },
|
{ .compatible = "allwinner,sun8i-a33-display-engine" },
|
||||||
{ .compatible = "allwinner,sun8i-v3s-display-engine" },
|
{ .compatible = "allwinner,sun8i-v3s-display-engine" },
|
||||||
{ }
|
{ }
|
||||||
|
|||||||
@@ -13,7 +13,6 @@
|
|||||||
#include <linux/clk-provider.h>
|
#include <linux/clk-provider.h>
|
||||||
#include <linux/regmap.h>
|
#include <linux/regmap.h>
|
||||||
|
|
||||||
#include "sun4i_tcon.h"
|
|
||||||
#include "sun4i_hdmi.h"
|
#include "sun4i_hdmi.h"
|
||||||
|
|
||||||
struct sun4i_ddc {
|
struct sun4i_ddc {
|
||||||
|
|||||||
@@ -30,7 +30,6 @@
|
|||||||
#include "sun4i_crtc.h"
|
#include "sun4i_crtc.h"
|
||||||
#include "sun4i_drv.h"
|
#include "sun4i_drv.h"
|
||||||
#include "sun4i_hdmi.h"
|
#include "sun4i_hdmi.h"
|
||||||
#include "sun4i_tcon.h"
|
|
||||||
|
|
||||||
static inline struct sun4i_hdmi *
|
static inline struct sun4i_hdmi *
|
||||||
drm_encoder_to_sun4i_hdmi(struct drm_encoder *encoder)
|
drm_encoder_to_sun4i_hdmi(struct drm_encoder *encoder)
|
||||||
@@ -86,8 +85,6 @@ static int sun4i_hdmi_atomic_check(struct drm_encoder *encoder,
|
|||||||
static void sun4i_hdmi_disable(struct drm_encoder *encoder)
|
static void sun4i_hdmi_disable(struct drm_encoder *encoder)
|
||||||
{
|
{
|
||||||
struct sun4i_hdmi *hdmi = drm_encoder_to_sun4i_hdmi(encoder);
|
struct sun4i_hdmi *hdmi = drm_encoder_to_sun4i_hdmi(encoder);
|
||||||
struct sun4i_crtc *crtc = drm_crtc_to_sun4i_crtc(encoder->crtc);
|
|
||||||
struct sun4i_tcon *tcon = crtc->tcon;
|
|
||||||
u32 val;
|
u32 val;
|
||||||
|
|
||||||
DRM_DEBUG_DRIVER("Disabling the HDMI Output\n");
|
DRM_DEBUG_DRIVER("Disabling the HDMI Output\n");
|
||||||
@@ -95,22 +92,16 @@ static void sun4i_hdmi_disable(struct drm_encoder *encoder)
|
|||||||
val = readl(hdmi->base + SUN4I_HDMI_VID_CTRL_REG);
|
val = readl(hdmi->base + SUN4I_HDMI_VID_CTRL_REG);
|
||||||
val &= ~SUN4I_HDMI_VID_CTRL_ENABLE;
|
val &= ~SUN4I_HDMI_VID_CTRL_ENABLE;
|
||||||
writel(val, hdmi->base + SUN4I_HDMI_VID_CTRL_REG);
|
writel(val, hdmi->base + SUN4I_HDMI_VID_CTRL_REG);
|
||||||
|
|
||||||
sun4i_tcon_channel_disable(tcon, 1);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void sun4i_hdmi_enable(struct drm_encoder *encoder)
|
static void sun4i_hdmi_enable(struct drm_encoder *encoder)
|
||||||
{
|
{
|
||||||
struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode;
|
struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode;
|
||||||
struct sun4i_hdmi *hdmi = drm_encoder_to_sun4i_hdmi(encoder);
|
struct sun4i_hdmi *hdmi = drm_encoder_to_sun4i_hdmi(encoder);
|
||||||
struct sun4i_crtc *crtc = drm_crtc_to_sun4i_crtc(encoder->crtc);
|
|
||||||
struct sun4i_tcon *tcon = crtc->tcon;
|
|
||||||
u32 val = 0;
|
u32 val = 0;
|
||||||
|
|
||||||
DRM_DEBUG_DRIVER("Enabling the HDMI Output\n");
|
DRM_DEBUG_DRIVER("Enabling the HDMI Output\n");
|
||||||
|
|
||||||
sun4i_tcon_channel_enable(tcon, 1);
|
|
||||||
|
|
||||||
sun4i_hdmi_setup_avi_infoframes(hdmi, mode);
|
sun4i_hdmi_setup_avi_infoframes(hdmi, mode);
|
||||||
val |= SUN4I_HDMI_PKT_CTRL_TYPE(0, SUN4I_HDMI_PKT_AVI);
|
val |= SUN4I_HDMI_PKT_CTRL_TYPE(0, SUN4I_HDMI_PKT_AVI);
|
||||||
val |= SUN4I_HDMI_PKT_CTRL_TYPE(1, SUN4I_HDMI_PKT_END);
|
val |= SUN4I_HDMI_PKT_CTRL_TYPE(1, SUN4I_HDMI_PKT_END);
|
||||||
@@ -128,15 +119,9 @@ static void sun4i_hdmi_mode_set(struct drm_encoder *encoder,
|
|||||||
struct drm_display_mode *adjusted_mode)
|
struct drm_display_mode *adjusted_mode)
|
||||||
{
|
{
|
||||||
struct sun4i_hdmi *hdmi = drm_encoder_to_sun4i_hdmi(encoder);
|
struct sun4i_hdmi *hdmi = drm_encoder_to_sun4i_hdmi(encoder);
|
||||||
struct sun4i_crtc *crtc = drm_crtc_to_sun4i_crtc(encoder->crtc);
|
|
||||||
struct sun4i_tcon *tcon = crtc->tcon;
|
|
||||||
unsigned int x, y;
|
unsigned int x, y;
|
||||||
u32 val;
|
u32 val;
|
||||||
|
|
||||||
sun4i_tcon1_mode_set(tcon, mode);
|
|
||||||
sun4i_tcon_set_mux(tcon, 1, encoder);
|
|
||||||
|
|
||||||
clk_set_rate(tcon->sclk1, mode->crtc_clock * 1000);
|
|
||||||
clk_set_rate(hdmi->mod_clk, mode->crtc_clock * 1000);
|
clk_set_rate(hdmi->mod_clk, mode->crtc_clock * 1000);
|
||||||
clk_set_rate(hdmi->tmds_clk, mode->crtc_clock * 1000);
|
clk_set_rate(hdmi->tmds_clk, mode->crtc_clock * 1000);
|
||||||
|
|
||||||
@@ -289,6 +274,58 @@ static const struct cec_pin_ops sun4i_hdmi_cec_pin_ops = {
|
|||||||
#define SUN4I_HDMI_PAD_CTRL1_MASK (GENMASK(24, 7) | GENMASK(5, 0))
|
#define SUN4I_HDMI_PAD_CTRL1_MASK (GENMASK(24, 7) | GENMASK(5, 0))
|
||||||
#define SUN4I_HDMI_PLL_CTRL_MASK (GENMASK(31, 8) | GENMASK(3, 0))
|
#define SUN4I_HDMI_PLL_CTRL_MASK (GENMASK(31, 8) | GENMASK(3, 0))
|
||||||
|
|
||||||
|
/* Only difference from sun5i is AMP is 4 instead of 6 */
|
||||||
|
static const struct sun4i_hdmi_variant sun4i_variant = {
|
||||||
|
.pad_ctrl0_init_val = SUN4I_HDMI_PAD_CTRL0_TXEN |
|
||||||
|
SUN4I_HDMI_PAD_CTRL0_CKEN |
|
||||||
|
SUN4I_HDMI_PAD_CTRL0_PWENG |
|
||||||
|
SUN4I_HDMI_PAD_CTRL0_PWEND |
|
||||||
|
SUN4I_HDMI_PAD_CTRL0_PWENC |
|
||||||
|
SUN4I_HDMI_PAD_CTRL0_LDODEN |
|
||||||
|
SUN4I_HDMI_PAD_CTRL0_LDOCEN |
|
||||||
|
SUN4I_HDMI_PAD_CTRL0_BIASEN,
|
||||||
|
.pad_ctrl1_init_val = SUN4I_HDMI_PAD_CTRL1_REG_AMP(4) |
|
||||||
|
SUN4I_HDMI_PAD_CTRL1_REG_EMP(2) |
|
||||||
|
SUN4I_HDMI_PAD_CTRL1_REG_DENCK |
|
||||||
|
SUN4I_HDMI_PAD_CTRL1_REG_DEN |
|
||||||
|
SUN4I_HDMI_PAD_CTRL1_EMPCK_OPT |
|
||||||
|
SUN4I_HDMI_PAD_CTRL1_EMP_OPT |
|
||||||
|
SUN4I_HDMI_PAD_CTRL1_AMPCK_OPT |
|
||||||
|
SUN4I_HDMI_PAD_CTRL1_AMP_OPT,
|
||||||
|
.pll_ctrl_init_val = SUN4I_HDMI_PLL_CTRL_VCO_S(8) |
|
||||||
|
SUN4I_HDMI_PLL_CTRL_CS(7) |
|
||||||
|
SUN4I_HDMI_PLL_CTRL_CP_S(15) |
|
||||||
|
SUN4I_HDMI_PLL_CTRL_S(7) |
|
||||||
|
SUN4I_HDMI_PLL_CTRL_VCO_GAIN(4) |
|
||||||
|
SUN4I_HDMI_PLL_CTRL_SDIV2 |
|
||||||
|
SUN4I_HDMI_PLL_CTRL_LDO2_EN |
|
||||||
|
SUN4I_HDMI_PLL_CTRL_LDO1_EN |
|
||||||
|
SUN4I_HDMI_PLL_CTRL_HV_IS_33 |
|
||||||
|
SUN4I_HDMI_PLL_CTRL_BWS |
|
||||||
|
SUN4I_HDMI_PLL_CTRL_PLL_EN,
|
||||||
|
|
||||||
|
.ddc_clk_reg = REG_FIELD(SUN4I_HDMI_DDC_CLK_REG, 0, 6),
|
||||||
|
.ddc_clk_pre_divider = 2,
|
||||||
|
.ddc_clk_m_offset = 1,
|
||||||
|
|
||||||
|
.field_ddc_en = REG_FIELD(SUN4I_HDMI_DDC_CTRL_REG, 31, 31),
|
||||||
|
.field_ddc_start = REG_FIELD(SUN4I_HDMI_DDC_CTRL_REG, 30, 30),
|
||||||
|
.field_ddc_reset = REG_FIELD(SUN4I_HDMI_DDC_CTRL_REG, 0, 0),
|
||||||
|
.field_ddc_addr_reg = REG_FIELD(SUN4I_HDMI_DDC_ADDR_REG, 0, 31),
|
||||||
|
.field_ddc_slave_addr = REG_FIELD(SUN4I_HDMI_DDC_ADDR_REG, 0, 6),
|
||||||
|
.field_ddc_int_status = REG_FIELD(SUN4I_HDMI_DDC_INT_STATUS_REG, 0, 8),
|
||||||
|
.field_ddc_fifo_clear = REG_FIELD(SUN4I_HDMI_DDC_FIFO_CTRL_REG, 31, 31),
|
||||||
|
.field_ddc_fifo_rx_thres = REG_FIELD(SUN4I_HDMI_DDC_FIFO_CTRL_REG, 4, 7),
|
||||||
|
.field_ddc_fifo_tx_thres = REG_FIELD(SUN4I_HDMI_DDC_FIFO_CTRL_REG, 0, 3),
|
||||||
|
.field_ddc_byte_count = REG_FIELD(SUN4I_HDMI_DDC_BYTE_COUNT_REG, 0, 9),
|
||||||
|
.field_ddc_cmd = REG_FIELD(SUN4I_HDMI_DDC_CMD_REG, 0, 2),
|
||||||
|
.field_ddc_sda_en = REG_FIELD(SUN4I_HDMI_DDC_LINE_CTRL_REG, 9, 9),
|
||||||
|
.field_ddc_sck_en = REG_FIELD(SUN4I_HDMI_DDC_LINE_CTRL_REG, 8, 8),
|
||||||
|
|
||||||
|
.ddc_fifo_reg = SUN4I_HDMI_DDC_FIFO_DATA_REG,
|
||||||
|
.ddc_fifo_has_dir = true,
|
||||||
|
};
|
||||||
|
|
||||||
static const struct sun4i_hdmi_variant sun5i_variant = {
|
static const struct sun4i_hdmi_variant sun5i_variant = {
|
||||||
.pad_ctrl0_init_val = SUN4I_HDMI_PAD_CTRL0_TXEN |
|
.pad_ctrl0_init_val = SUN4I_HDMI_PAD_CTRL0_TXEN |
|
||||||
SUN4I_HDMI_PAD_CTRL0_CKEN |
|
SUN4I_HDMI_PAD_CTRL0_CKEN |
|
||||||
@@ -613,6 +650,7 @@ static int sun4i_hdmi_remove(struct platform_device *pdev)
|
|||||||
}
|
}
|
||||||
|
|
||||||
static const struct of_device_id sun4i_hdmi_of_table[] = {
|
static const struct of_device_id sun4i_hdmi_of_table[] = {
|
||||||
|
{ .compatible = "allwinner,sun4i-a10-hdmi", .data = &sun4i_variant, },
|
||||||
{ .compatible = "allwinner,sun5i-a10s-hdmi", .data = &sun5i_variant, },
|
{ .compatible = "allwinner,sun5i-a10s-hdmi", .data = &sun5i_variant, },
|
||||||
{ .compatible = "allwinner,sun6i-a31-hdmi", .data = &sun6i_variant, },
|
{ .compatible = "allwinner,sun6i-a31-hdmi", .data = &sun6i_variant, },
|
||||||
{ }
|
{ }
|
||||||
|
|||||||
@@ -12,7 +12,6 @@
|
|||||||
|
|
||||||
#include <linux/clk-provider.h>
|
#include <linux/clk-provider.h>
|
||||||
|
|
||||||
#include "sun4i_tcon.h"
|
|
||||||
#include "sun4i_hdmi.h"
|
#include "sun4i_hdmi.h"
|
||||||
|
|
||||||
struct sun4i_tmds {
|
struct sun4i_tmds {
|
||||||
|
|||||||
@@ -134,13 +134,10 @@ static void sun4i_rgb_encoder_enable(struct drm_encoder *encoder)
|
|||||||
|
|
||||||
DRM_DEBUG_DRIVER("Enabling RGB output\n");
|
DRM_DEBUG_DRIVER("Enabling RGB output\n");
|
||||||
|
|
||||||
if (!IS_ERR(tcon->panel))
|
if (!IS_ERR(tcon->panel)) {
|
||||||
drm_panel_prepare(tcon->panel);
|
drm_panel_prepare(tcon->panel);
|
||||||
|
|
||||||
sun4i_tcon_channel_enable(tcon, 0);
|
|
||||||
|
|
||||||
if (!IS_ERR(tcon->panel))
|
|
||||||
drm_panel_enable(tcon->panel);
|
drm_panel_enable(tcon->panel);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void sun4i_rgb_encoder_disable(struct drm_encoder *encoder)
|
static void sun4i_rgb_encoder_disable(struct drm_encoder *encoder)
|
||||||
@@ -150,31 +147,13 @@ static void sun4i_rgb_encoder_disable(struct drm_encoder *encoder)
|
|||||||
|
|
||||||
DRM_DEBUG_DRIVER("Disabling RGB output\n");
|
DRM_DEBUG_DRIVER("Disabling RGB output\n");
|
||||||
|
|
||||||
if (!IS_ERR(tcon->panel))
|
if (!IS_ERR(tcon->panel)) {
|
||||||
drm_panel_disable(tcon->panel);
|
drm_panel_disable(tcon->panel);
|
||||||
|
|
||||||
sun4i_tcon_channel_disable(tcon, 0);
|
|
||||||
|
|
||||||
if (!IS_ERR(tcon->panel))
|
|
||||||
drm_panel_unprepare(tcon->panel);
|
drm_panel_unprepare(tcon->panel);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void sun4i_rgb_encoder_mode_set(struct drm_encoder *encoder,
|
|
||||||
struct drm_display_mode *mode,
|
|
||||||
struct drm_display_mode *adjusted_mode)
|
|
||||||
{
|
|
||||||
struct sun4i_rgb *rgb = drm_encoder_to_sun4i_rgb(encoder);
|
|
||||||
struct sun4i_tcon *tcon = rgb->tcon;
|
|
||||||
|
|
||||||
sun4i_tcon0_mode_set(tcon, mode);
|
|
||||||
sun4i_tcon_set_mux(tcon, 0, encoder);
|
|
||||||
|
|
||||||
/* FIXME: This seems to be board specific */
|
|
||||||
clk_set_phase(tcon->dclk, 120);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct drm_encoder_helper_funcs sun4i_rgb_enc_helper_funcs = {
|
static struct drm_encoder_helper_funcs sun4i_rgb_enc_helper_funcs = {
|
||||||
.mode_set = sun4i_rgb_encoder_mode_set,
|
|
||||||
.disable = sun4i_rgb_encoder_disable,
|
.disable = sun4i_rgb_encoder_disable,
|
||||||
.enable = sun4i_rgb_encoder_enable,
|
.enable = sun4i_rgb_encoder_enable,
|
||||||
};
|
};
|
||||||
|
|||||||
@@ -35,66 +35,61 @@
|
|||||||
#include "sun4i_tcon.h"
|
#include "sun4i_tcon.h"
|
||||||
#include "sunxi_engine.h"
|
#include "sunxi_engine.h"
|
||||||
|
|
||||||
void sun4i_tcon_disable(struct sun4i_tcon *tcon)
|
static void sun4i_tcon_channel_set_status(struct sun4i_tcon *tcon, int channel,
|
||||||
|
bool enabled)
|
||||||
{
|
{
|
||||||
DRM_DEBUG_DRIVER("Disabling TCON\n");
|
struct clk *clk;
|
||||||
|
|
||||||
/* Disable the TCON */
|
switch (channel) {
|
||||||
regmap_update_bits(tcon->regs, SUN4I_TCON_GCTL_REG,
|
case 0:
|
||||||
SUN4I_TCON_GCTL_TCON_ENABLE, 0);
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL(sun4i_tcon_disable);
|
|
||||||
|
|
||||||
void sun4i_tcon_enable(struct sun4i_tcon *tcon)
|
|
||||||
{
|
|
||||||
DRM_DEBUG_DRIVER("Enabling TCON\n");
|
|
||||||
|
|
||||||
/* Enable the TCON */
|
|
||||||
regmap_update_bits(tcon->regs, SUN4I_TCON_GCTL_REG,
|
|
||||||
SUN4I_TCON_GCTL_TCON_ENABLE,
|
|
||||||
SUN4I_TCON_GCTL_TCON_ENABLE);
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL(sun4i_tcon_enable);
|
|
||||||
|
|
||||||
void sun4i_tcon_channel_disable(struct sun4i_tcon *tcon, int channel)
|
|
||||||
{
|
|
||||||
DRM_DEBUG_DRIVER("Disabling TCON channel %d\n", channel);
|
|
||||||
|
|
||||||
/* Disable the TCON's channel */
|
|
||||||
if (channel == 0) {
|
|
||||||
regmap_update_bits(tcon->regs, SUN4I_TCON0_CTL_REG,
|
|
||||||
SUN4I_TCON0_CTL_TCON_ENABLE, 0);
|
|
||||||
clk_disable_unprepare(tcon->dclk);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
WARN_ON(!tcon->quirks->has_channel_1);
|
|
||||||
regmap_update_bits(tcon->regs, SUN4I_TCON1_CTL_REG,
|
|
||||||
SUN4I_TCON1_CTL_TCON_ENABLE, 0);
|
|
||||||
clk_disable_unprepare(tcon->sclk1);
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL(sun4i_tcon_channel_disable);
|
|
||||||
|
|
||||||
void sun4i_tcon_channel_enable(struct sun4i_tcon *tcon, int channel)
|
|
||||||
{
|
|
||||||
DRM_DEBUG_DRIVER("Enabling TCON channel %d\n", channel);
|
|
||||||
|
|
||||||
/* Enable the TCON's channel */
|
|
||||||
if (channel == 0) {
|
|
||||||
regmap_update_bits(tcon->regs, SUN4I_TCON0_CTL_REG,
|
regmap_update_bits(tcon->regs, SUN4I_TCON0_CTL_REG,
|
||||||
SUN4I_TCON0_CTL_TCON_ENABLE,
|
SUN4I_TCON0_CTL_TCON_ENABLE,
|
||||||
SUN4I_TCON0_CTL_TCON_ENABLE);
|
enabled ? SUN4I_TCON0_CTL_TCON_ENABLE : 0);
|
||||||
clk_prepare_enable(tcon->dclk);
|
clk = tcon->dclk;
|
||||||
return;
|
break;
|
||||||
}
|
case 1:
|
||||||
|
|
||||||
WARN_ON(!tcon->quirks->has_channel_1);
|
WARN_ON(!tcon->quirks->has_channel_1);
|
||||||
regmap_update_bits(tcon->regs, SUN4I_TCON1_CTL_REG,
|
regmap_update_bits(tcon->regs, SUN4I_TCON1_CTL_REG,
|
||||||
SUN4I_TCON1_CTL_TCON_ENABLE,
|
SUN4I_TCON1_CTL_TCON_ENABLE,
|
||||||
SUN4I_TCON1_CTL_TCON_ENABLE);
|
enabled ? SUN4I_TCON1_CTL_TCON_ENABLE : 0);
|
||||||
clk_prepare_enable(tcon->sclk1);
|
clk = tcon->sclk1;
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
DRM_WARN("Unknown channel... doing nothing\n");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (enabled)
|
||||||
|
clk_prepare_enable(clk);
|
||||||
|
else
|
||||||
|
clk_disable_unprepare(clk);
|
||||||
|
}
|
||||||
|
|
||||||
|
void sun4i_tcon_set_status(struct sun4i_tcon *tcon,
|
||||||
|
const struct drm_encoder *encoder,
|
||||||
|
bool enabled)
|
||||||
|
{
|
||||||
|
int channel;
|
||||||
|
|
||||||
|
switch (encoder->encoder_type) {
|
||||||
|
case DRM_MODE_ENCODER_NONE:
|
||||||
|
channel = 0;
|
||||||
|
break;
|
||||||
|
case DRM_MODE_ENCODER_TMDS:
|
||||||
|
case DRM_MODE_ENCODER_TVDAC:
|
||||||
|
channel = 1;
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
DRM_DEBUG_DRIVER("Unknown encoder type, doing nothing...\n");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
regmap_update_bits(tcon->regs, SUN4I_TCON_GCTL_REG,
|
||||||
|
SUN4I_TCON_GCTL_TCON_ENABLE,
|
||||||
|
enabled ? SUN4I_TCON_GCTL_TCON_ENABLE : 0);
|
||||||
|
|
||||||
|
sun4i_tcon_channel_set_status(tcon, channel, enabled);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(sun4i_tcon_channel_enable);
|
|
||||||
|
|
||||||
void sun4i_tcon_enable_vblank(struct sun4i_tcon *tcon, bool enable)
|
void sun4i_tcon_enable_vblank(struct sun4i_tcon *tcon, bool enable)
|
||||||
{
|
{
|
||||||
@@ -134,7 +129,7 @@ static struct sun4i_tcon *sun4i_get_tcon0(struct drm_device *drm)
|
|||||||
}
|
}
|
||||||
|
|
||||||
void sun4i_tcon_set_mux(struct sun4i_tcon *tcon, int channel,
|
void sun4i_tcon_set_mux(struct sun4i_tcon *tcon, int channel,
|
||||||
struct drm_encoder *encoder)
|
const struct drm_encoder *encoder)
|
||||||
{
|
{
|
||||||
int ret = -ENOTSUPP;
|
int ret = -ENOTSUPP;
|
||||||
|
|
||||||
@@ -144,9 +139,8 @@ void sun4i_tcon_set_mux(struct sun4i_tcon *tcon, int channel,
|
|||||||
DRM_DEBUG_DRIVER("Muxing encoder %s to CRTC %s: %d\n",
|
DRM_DEBUG_DRIVER("Muxing encoder %s to CRTC %s: %d\n",
|
||||||
encoder->name, encoder->crtc->name, ret);
|
encoder->name, encoder->crtc->name, ret);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(sun4i_tcon_set_mux);
|
|
||||||
|
|
||||||
static int sun4i_tcon_get_clk_delay(struct drm_display_mode *mode,
|
static int sun4i_tcon_get_clk_delay(const struct drm_display_mode *mode,
|
||||||
int channel)
|
int channel)
|
||||||
{
|
{
|
||||||
int delay = mode->vtotal - mode->vdisplay;
|
int delay = mode->vtotal - mode->vdisplay;
|
||||||
@@ -164,15 +158,26 @@ static int sun4i_tcon_get_clk_delay(struct drm_display_mode *mode,
|
|||||||
return delay;
|
return delay;
|
||||||
}
|
}
|
||||||
|
|
||||||
void sun4i_tcon0_mode_set(struct sun4i_tcon *tcon,
|
static void sun4i_tcon0_mode_set_common(struct sun4i_tcon *tcon,
|
||||||
struct drm_display_mode *mode)
|
const struct drm_display_mode *mode)
|
||||||
|
{
|
||||||
|
/* Configure the dot clock */
|
||||||
|
clk_set_rate(tcon->dclk, mode->crtc_clock * 1000);
|
||||||
|
|
||||||
|
/* Set the resolution */
|
||||||
|
regmap_write(tcon->regs, SUN4I_TCON0_BASIC0_REG,
|
||||||
|
SUN4I_TCON0_BASIC0_X(mode->crtc_hdisplay) |
|
||||||
|
SUN4I_TCON0_BASIC0_Y(mode->crtc_vdisplay));
|
||||||
|
}
|
||||||
|
|
||||||
|
static void sun4i_tcon0_mode_set_rgb(struct sun4i_tcon *tcon,
|
||||||
|
const struct drm_display_mode *mode)
|
||||||
{
|
{
|
||||||
unsigned int bp, hsync, vsync;
|
unsigned int bp, hsync, vsync;
|
||||||
u8 clk_delay;
|
u8 clk_delay;
|
||||||
u32 val = 0;
|
u32 val = 0;
|
||||||
|
|
||||||
/* Configure the dot clock */
|
sun4i_tcon0_mode_set_common(tcon, mode);
|
||||||
clk_set_rate(tcon->dclk, mode->crtc_clock * 1000);
|
|
||||||
|
|
||||||
/* Adjust clock delay */
|
/* Adjust clock delay */
|
||||||
clk_delay = sun4i_tcon_get_clk_delay(mode, 0);
|
clk_delay = sun4i_tcon_get_clk_delay(mode, 0);
|
||||||
@@ -180,11 +185,6 @@ void sun4i_tcon0_mode_set(struct sun4i_tcon *tcon,
|
|||||||
SUN4I_TCON0_CTL_CLK_DELAY_MASK,
|
SUN4I_TCON0_CTL_CLK_DELAY_MASK,
|
||||||
SUN4I_TCON0_CTL_CLK_DELAY(clk_delay));
|
SUN4I_TCON0_CTL_CLK_DELAY(clk_delay));
|
||||||
|
|
||||||
/* Set the resolution */
|
|
||||||
regmap_write(tcon->regs, SUN4I_TCON0_BASIC0_REG,
|
|
||||||
SUN4I_TCON0_BASIC0_X(mode->crtc_hdisplay) |
|
|
||||||
SUN4I_TCON0_BASIC0_Y(mode->crtc_vdisplay));
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This is called a backporch in the register documentation,
|
* This is called a backporch in the register documentation,
|
||||||
* but it really is the back porch + hsync
|
* but it really is the back porch + hsync
|
||||||
@@ -238,10 +238,9 @@ void sun4i_tcon0_mode_set(struct sun4i_tcon *tcon,
|
|||||||
/* Enable the output on the pins */
|
/* Enable the output on the pins */
|
||||||
regmap_write(tcon->regs, SUN4I_TCON0_IO_TRI_REG, 0);
|
regmap_write(tcon->regs, SUN4I_TCON0_IO_TRI_REG, 0);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(sun4i_tcon0_mode_set);
|
|
||||||
|
|
||||||
void sun4i_tcon1_mode_set(struct sun4i_tcon *tcon,
|
static void sun4i_tcon1_mode_set(struct sun4i_tcon *tcon,
|
||||||
struct drm_display_mode *mode)
|
const struct drm_display_mode *mode)
|
||||||
{
|
{
|
||||||
unsigned int bp, hsync, vsync, vtotal;
|
unsigned int bp, hsync, vsync, vtotal;
|
||||||
u8 clk_delay;
|
u8 clk_delay;
|
||||||
@@ -329,7 +328,26 @@ void sun4i_tcon1_mode_set(struct sun4i_tcon *tcon,
|
|||||||
SUN4I_TCON_GCTL_IOMAP_MASK,
|
SUN4I_TCON_GCTL_IOMAP_MASK,
|
||||||
SUN4I_TCON_GCTL_IOMAP_TCON1);
|
SUN4I_TCON_GCTL_IOMAP_TCON1);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(sun4i_tcon1_mode_set);
|
|
||||||
|
void sun4i_tcon_mode_set(struct sun4i_tcon *tcon,
|
||||||
|
const struct drm_encoder *encoder,
|
||||||
|
const struct drm_display_mode *mode)
|
||||||
|
{
|
||||||
|
switch (encoder->encoder_type) {
|
||||||
|
case DRM_MODE_ENCODER_NONE:
|
||||||
|
sun4i_tcon0_mode_set_rgb(tcon, mode);
|
||||||
|
sun4i_tcon_set_mux(tcon, 0, encoder);
|
||||||
|
break;
|
||||||
|
case DRM_MODE_ENCODER_TVDAC:
|
||||||
|
case DRM_MODE_ENCODER_TMDS:
|
||||||
|
sun4i_tcon1_mode_set(tcon, mode);
|
||||||
|
sun4i_tcon_set_mux(tcon, 1, encoder);
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
DRM_DEBUG_DRIVER("Unknown encoder type, doing nothing...\n");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(sun4i_tcon_mode_set);
|
||||||
|
|
||||||
static void sun4i_tcon_finish_page_flip(struct drm_device *dev,
|
static void sun4i_tcon_finish_page_flip(struct drm_device *dev,
|
||||||
struct sun4i_crtc *scrtc)
|
struct sun4i_crtc *scrtc)
|
||||||
@@ -782,8 +800,32 @@ static int sun4i_tcon_remove(struct platform_device *pdev)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* platform specific TCON muxing callbacks */
|
/* platform specific TCON muxing callbacks */
|
||||||
|
static int sun4i_a10_tcon_set_mux(struct sun4i_tcon *tcon,
|
||||||
|
const struct drm_encoder *encoder)
|
||||||
|
{
|
||||||
|
struct sun4i_tcon *tcon0 = sun4i_get_tcon0(encoder->dev);
|
||||||
|
u32 shift;
|
||||||
|
|
||||||
|
if (!tcon0)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
switch (encoder->encoder_type) {
|
||||||
|
case DRM_MODE_ENCODER_TMDS:
|
||||||
|
/* HDMI */
|
||||||
|
shift = 8;
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
|
regmap_update_bits(tcon0->regs, SUN4I_TCON_MUX_CTRL_REG,
|
||||||
|
0x3 << shift, tcon->id << shift);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
static int sun5i_a13_tcon_set_mux(struct sun4i_tcon *tcon,
|
static int sun5i_a13_tcon_set_mux(struct sun4i_tcon *tcon,
|
||||||
struct drm_encoder *encoder)
|
const struct drm_encoder *encoder)
|
||||||
{
|
{
|
||||||
u32 val;
|
u32 val;
|
||||||
|
|
||||||
@@ -799,7 +841,7 @@ static int sun5i_a13_tcon_set_mux(struct sun4i_tcon *tcon,
|
|||||||
}
|
}
|
||||||
|
|
||||||
static int sun6i_tcon_set_mux(struct sun4i_tcon *tcon,
|
static int sun6i_tcon_set_mux(struct sun4i_tcon *tcon,
|
||||||
struct drm_encoder *encoder)
|
const struct drm_encoder *encoder)
|
||||||
{
|
{
|
||||||
struct sun4i_tcon *tcon0 = sun4i_get_tcon0(encoder->dev);
|
struct sun4i_tcon *tcon0 = sun4i_get_tcon0(encoder->dev);
|
||||||
u32 shift;
|
u32 shift;
|
||||||
@@ -823,6 +865,11 @@ static int sun6i_tcon_set_mux(struct sun4i_tcon *tcon,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static const struct sun4i_tcon_quirks sun4i_a10_quirks = {
|
||||||
|
.has_channel_1 = true,
|
||||||
|
.set_mux = sun4i_a10_tcon_set_mux,
|
||||||
|
};
|
||||||
|
|
||||||
static const struct sun4i_tcon_quirks sun5i_a13_quirks = {
|
static const struct sun4i_tcon_quirks sun5i_a13_quirks = {
|
||||||
.has_channel_1 = true,
|
.has_channel_1 = true,
|
||||||
.set_mux = sun5i_a13_tcon_set_mux,
|
.set_mux = sun5i_a13_tcon_set_mux,
|
||||||
@@ -839,6 +886,12 @@ static const struct sun4i_tcon_quirks sun6i_a31s_quirks = {
|
|||||||
.needs_de_be_mux = true,
|
.needs_de_be_mux = true,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static const struct sun4i_tcon_quirks sun7i_a20_quirks = {
|
||||||
|
.has_channel_1 = true,
|
||||||
|
/* Same display pipeline structure as A10 */
|
||||||
|
.set_mux = sun4i_a10_tcon_set_mux,
|
||||||
|
};
|
||||||
|
|
||||||
static const struct sun4i_tcon_quirks sun8i_a33_quirks = {
|
static const struct sun4i_tcon_quirks sun8i_a33_quirks = {
|
||||||
/* nothing is supported */
|
/* nothing is supported */
|
||||||
};
|
};
|
||||||
@@ -848,9 +901,11 @@ static const struct sun4i_tcon_quirks sun8i_v3s_quirks = {
|
|||||||
};
|
};
|
||||||
|
|
||||||
static const struct of_device_id sun4i_tcon_of_table[] = {
|
static const struct of_device_id sun4i_tcon_of_table[] = {
|
||||||
|
{ .compatible = "allwinner,sun4i-a10-tcon", .data = &sun4i_a10_quirks },
|
||||||
{ .compatible = "allwinner,sun5i-a13-tcon", .data = &sun5i_a13_quirks },
|
{ .compatible = "allwinner,sun5i-a13-tcon", .data = &sun5i_a13_quirks },
|
||||||
{ .compatible = "allwinner,sun6i-a31-tcon", .data = &sun6i_a31_quirks },
|
{ .compatible = "allwinner,sun6i-a31-tcon", .data = &sun6i_a31_quirks },
|
||||||
{ .compatible = "allwinner,sun6i-a31s-tcon", .data = &sun6i_a31s_quirks },
|
{ .compatible = "allwinner,sun6i-a31s-tcon", .data = &sun6i_a31s_quirks },
|
||||||
|
{ .compatible = "allwinner,sun7i-a20-tcon", .data = &sun7i_a20_quirks },
|
||||||
{ .compatible = "allwinner,sun8i-a33-tcon", .data = &sun8i_a33_quirks },
|
{ .compatible = "allwinner,sun8i-a33-tcon", .data = &sun8i_a33_quirks },
|
||||||
{ .compatible = "allwinner,sun8i-v3s-tcon", .data = &sun8i_v3s_quirks },
|
{ .compatible = "allwinner,sun8i-v3s-tcon", .data = &sun8i_v3s_quirks },
|
||||||
{ }
|
{ }
|
||||||
|
|||||||
@@ -152,7 +152,7 @@ struct sun4i_tcon_quirks {
|
|||||||
bool needs_de_be_mux; /* sun6i needs mux to select backend */
|
bool needs_de_be_mux; /* sun6i needs mux to select backend */
|
||||||
|
|
||||||
/* callback to handle tcon muxing options */
|
/* callback to handle tcon muxing options */
|
||||||
int (*set_mux)(struct sun4i_tcon *, struct drm_encoder *);
|
int (*set_mux)(struct sun4i_tcon *, const struct drm_encoder *);
|
||||||
};
|
};
|
||||||
|
|
||||||
struct sun4i_tcon {
|
struct sun4i_tcon {
|
||||||
@@ -190,22 +190,11 @@ struct sun4i_tcon {
|
|||||||
struct drm_bridge *sun4i_tcon_find_bridge(struct device_node *node);
|
struct drm_bridge *sun4i_tcon_find_bridge(struct device_node *node);
|
||||||
struct drm_panel *sun4i_tcon_find_panel(struct device_node *node);
|
struct drm_panel *sun4i_tcon_find_panel(struct device_node *node);
|
||||||
|
|
||||||
/* Global Control */
|
|
||||||
void sun4i_tcon_disable(struct sun4i_tcon *tcon);
|
|
||||||
void sun4i_tcon_enable(struct sun4i_tcon *tcon);
|
|
||||||
|
|
||||||
/* Channel Control */
|
|
||||||
void sun4i_tcon_channel_disable(struct sun4i_tcon *tcon, int channel);
|
|
||||||
void sun4i_tcon_channel_enable(struct sun4i_tcon *tcon, int channel);
|
|
||||||
|
|
||||||
void sun4i_tcon_enable_vblank(struct sun4i_tcon *tcon, bool enable);
|
void sun4i_tcon_enable_vblank(struct sun4i_tcon *tcon, bool enable);
|
||||||
|
void sun4i_tcon_mode_set(struct sun4i_tcon *tcon,
|
||||||
/* Mode Related Controls */
|
const struct drm_encoder *encoder,
|
||||||
void sun4i_tcon_set_mux(struct sun4i_tcon *tcon, int channel,
|
const struct drm_display_mode *mode);
|
||||||
struct drm_encoder *encoder);
|
void sun4i_tcon_set_status(struct sun4i_tcon *crtc,
|
||||||
void sun4i_tcon0_mode_set(struct sun4i_tcon *tcon,
|
const struct drm_encoder *encoder, bool enable);
|
||||||
struct drm_display_mode *mode);
|
|
||||||
void sun4i_tcon1_mode_set(struct sun4i_tcon *tcon,
|
|
||||||
struct drm_display_mode *mode);
|
|
||||||
|
|
||||||
#endif /* __SUN4I_TCON_H__ */
|
#endif /* __SUN4I_TCON_H__ */
|
||||||
|
|||||||
@@ -24,7 +24,6 @@
|
|||||||
|
|
||||||
#include "sun4i_crtc.h"
|
#include "sun4i_crtc.h"
|
||||||
#include "sun4i_drv.h"
|
#include "sun4i_drv.h"
|
||||||
#include "sun4i_tcon.h"
|
|
||||||
#include "sunxi_engine.h"
|
#include "sunxi_engine.h"
|
||||||
|
|
||||||
#define SUN4I_TVE_EN_REG 0x000
|
#define SUN4I_TVE_EN_REG 0x000
|
||||||
@@ -345,12 +344,9 @@ static void sun4i_tv_disable(struct drm_encoder *encoder)
|
|||||||
{
|
{
|
||||||
struct sun4i_tv *tv = drm_encoder_to_sun4i_tv(encoder);
|
struct sun4i_tv *tv = drm_encoder_to_sun4i_tv(encoder);
|
||||||
struct sun4i_crtc *crtc = drm_crtc_to_sun4i_crtc(encoder->crtc);
|
struct sun4i_crtc *crtc = drm_crtc_to_sun4i_crtc(encoder->crtc);
|
||||||
struct sun4i_tcon *tcon = crtc->tcon;
|
|
||||||
|
|
||||||
DRM_DEBUG_DRIVER("Disabling the TV Output\n");
|
DRM_DEBUG_DRIVER("Disabling the TV Output\n");
|
||||||
|
|
||||||
sun4i_tcon_channel_disable(tcon, 1);
|
|
||||||
|
|
||||||
regmap_update_bits(tv->regs, SUN4I_TVE_EN_REG,
|
regmap_update_bits(tv->regs, SUN4I_TVE_EN_REG,
|
||||||
SUN4I_TVE_EN_ENABLE,
|
SUN4I_TVE_EN_ENABLE,
|
||||||
0);
|
0);
|
||||||
@@ -362,7 +358,6 @@ static void sun4i_tv_enable(struct drm_encoder *encoder)
|
|||||||
{
|
{
|
||||||
struct sun4i_tv *tv = drm_encoder_to_sun4i_tv(encoder);
|
struct sun4i_tv *tv = drm_encoder_to_sun4i_tv(encoder);
|
||||||
struct sun4i_crtc *crtc = drm_crtc_to_sun4i_crtc(encoder->crtc);
|
struct sun4i_crtc *crtc = drm_crtc_to_sun4i_crtc(encoder->crtc);
|
||||||
struct sun4i_tcon *tcon = crtc->tcon;
|
|
||||||
|
|
||||||
DRM_DEBUG_DRIVER("Enabling the TV Output\n");
|
DRM_DEBUG_DRIVER("Enabling the TV Output\n");
|
||||||
|
|
||||||
@@ -371,8 +366,6 @@ static void sun4i_tv_enable(struct drm_encoder *encoder)
|
|||||||
regmap_update_bits(tv->regs, SUN4I_TVE_EN_REG,
|
regmap_update_bits(tv->regs, SUN4I_TVE_EN_REG,
|
||||||
SUN4I_TVE_EN_ENABLE,
|
SUN4I_TVE_EN_ENABLE,
|
||||||
SUN4I_TVE_EN_ENABLE);
|
SUN4I_TVE_EN_ENABLE);
|
||||||
|
|
||||||
sun4i_tcon_channel_enable(tcon, 1);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void sun4i_tv_mode_set(struct drm_encoder *encoder,
|
static void sun4i_tv_mode_set(struct drm_encoder *encoder,
|
||||||
@@ -380,13 +373,8 @@ static void sun4i_tv_mode_set(struct drm_encoder *encoder,
|
|||||||
struct drm_display_mode *adjusted_mode)
|
struct drm_display_mode *adjusted_mode)
|
||||||
{
|
{
|
||||||
struct sun4i_tv *tv = drm_encoder_to_sun4i_tv(encoder);
|
struct sun4i_tv *tv = drm_encoder_to_sun4i_tv(encoder);
|
||||||
struct sun4i_crtc *crtc = drm_crtc_to_sun4i_crtc(encoder->crtc);
|
|
||||||
struct sun4i_tcon *tcon = crtc->tcon;
|
|
||||||
const struct tv_mode *tv_mode = sun4i_tv_find_tv_by_mode(mode);
|
const struct tv_mode *tv_mode = sun4i_tv_find_tv_by_mode(mode);
|
||||||
|
|
||||||
sun4i_tcon1_mode_set(tcon, mode);
|
|
||||||
sun4i_tcon_set_mux(tcon, 1, encoder);
|
|
||||||
|
|
||||||
/* Enable and map the DAC to the output */
|
/* Enable and map the DAC to the output */
|
||||||
regmap_update_bits(tv->regs, SUN4I_TVE_EN_REG,
|
regmap_update_bits(tv->regs, SUN4I_TVE_EN_REG,
|
||||||
SUN4I_TVE_EN_DAC_MAP_MASK,
|
SUN4I_TVE_EN_DAC_MAP_MASK,
|
||||||
|
|||||||
@@ -14,70 +14,95 @@
|
|||||||
#include <drm/drm_crtc.h>
|
#include <drm/drm_crtc.h>
|
||||||
#include <drm/drm_edid.h>
|
#include <drm/drm_edid.h>
|
||||||
#include <drm/drm_crtc_helper.h>
|
#include <drm/drm_crtc_helper.h>
|
||||||
|
#include "udl_connector.h"
|
||||||
#include "udl_drv.h"
|
#include "udl_drv.h"
|
||||||
|
|
||||||
/* dummy connector to just get EDID,
|
static bool udl_get_edid_block(struct udl_device *udl, int block_idx,
|
||||||
all UDL appear to have a DVI-D */
|
u8 *buff)
|
||||||
|
|
||||||
static u8 *udl_get_edid(struct udl_device *udl)
|
|
||||||
{
|
{
|
||||||
u8 *block;
|
|
||||||
char *rbuf;
|
|
||||||
int ret, i;
|
int ret, i;
|
||||||
|
u8 *read_buff;
|
||||||
|
|
||||||
block = kmalloc(EDID_LENGTH, GFP_KERNEL);
|
read_buff = kmalloc(2, GFP_KERNEL);
|
||||||
if (block == NULL)
|
if (!read_buff)
|
||||||
return NULL;
|
return false;
|
||||||
|
|
||||||
rbuf = kmalloc(2, GFP_KERNEL);
|
|
||||||
if (rbuf == NULL)
|
|
||||||
goto error;
|
|
||||||
|
|
||||||
for (i = 0; i < EDID_LENGTH; i++) {
|
for (i = 0; i < EDID_LENGTH; i++) {
|
||||||
|
int bval = (i + block_idx * EDID_LENGTH) << 8;
|
||||||
ret = usb_control_msg(udl->udev,
|
ret = usb_control_msg(udl->udev,
|
||||||
usb_rcvctrlpipe(udl->udev, 0), (0x02),
|
usb_rcvctrlpipe(udl->udev, 0),
|
||||||
(0x80 | (0x02 << 5)), i << 8, 0xA1, rbuf, 2,
|
(0x02), (0x80 | (0x02 << 5)), bval,
|
||||||
HZ);
|
0xA1, read_buff, 2, HZ);
|
||||||
if (ret < 1) {
|
if (ret < 1) {
|
||||||
DRM_ERROR("Read EDID byte %d failed err %x\n", i, ret);
|
DRM_ERROR("Read EDID byte %d failed err %x\n", i, ret);
|
||||||
goto error;
|
kfree(read_buff);
|
||||||
|
return false;
|
||||||
}
|
}
|
||||||
block[i] = rbuf[1];
|
buff[i] = read_buff[1];
|
||||||
}
|
}
|
||||||
|
|
||||||
kfree(rbuf);
|
kfree(read_buff);
|
||||||
return block;
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
error:
|
static bool udl_get_edid(struct udl_device *udl, u8 **result_buff,
|
||||||
kfree(block);
|
int *result_buff_size)
|
||||||
kfree(rbuf);
|
{
|
||||||
return NULL;
|
int i, extensions;
|
||||||
|
u8 *block_buff = NULL, *buff_ptr;
|
||||||
|
|
||||||
|
block_buff = kmalloc(EDID_LENGTH, GFP_KERNEL);
|
||||||
|
if (block_buff == NULL)
|
||||||
|
return false;
|
||||||
|
|
||||||
|
if (udl_get_edid_block(udl, 0, block_buff) &&
|
||||||
|
memchr_inv(block_buff, 0, EDID_LENGTH)) {
|
||||||
|
extensions = ((struct edid *)block_buff)->extensions;
|
||||||
|
if (extensions > 0) {
|
||||||
|
/* we have to read all extensions one by one */
|
||||||
|
*result_buff_size = EDID_LENGTH * (extensions + 1);
|
||||||
|
*result_buff = kmalloc(*result_buff_size, GFP_KERNEL);
|
||||||
|
buff_ptr = *result_buff;
|
||||||
|
if (buff_ptr == NULL) {
|
||||||
|
kfree(block_buff);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
memcpy(buff_ptr, block_buff, EDID_LENGTH);
|
||||||
|
kfree(block_buff);
|
||||||
|
buff_ptr += EDID_LENGTH;
|
||||||
|
for (i = 1; i < extensions; ++i) {
|
||||||
|
if (udl_get_edid_block(udl, i, buff_ptr)) {
|
||||||
|
buff_ptr += EDID_LENGTH;
|
||||||
|
} else {
|
||||||
|
kfree(*result_buff);
|
||||||
|
*result_buff = NULL;
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
/* we have only base edid block */
|
||||||
|
*result_buff = block_buff;
|
||||||
|
*result_buff_size = EDID_LENGTH;
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
kfree(block_buff);
|
||||||
|
|
||||||
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int udl_get_modes(struct drm_connector *connector)
|
static int udl_get_modes(struct drm_connector *connector)
|
||||||
{
|
{
|
||||||
struct udl_device *udl = connector->dev->dev_private;
|
struct udl_drm_connector *udl_connector =
|
||||||
struct edid *edid;
|
container_of(connector,
|
||||||
int ret;
|
struct udl_drm_connector,
|
||||||
|
connector);
|
||||||
|
|
||||||
edid = (struct edid *)udl_get_edid(udl);
|
drm_mode_connector_update_edid_property(connector, udl_connector->edid);
|
||||||
if (!edid) {
|
if (udl_connector->edid)
|
||||||
drm_mode_connector_update_edid_property(connector, NULL);
|
return drm_add_edid_modes(connector, udl_connector->edid);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* We only read the main block, but if the monitor reports extension
|
|
||||||
* blocks then the drm edid code expects them to be present, so patch
|
|
||||||
* the extension count to 0.
|
|
||||||
*/
|
|
||||||
edid->checksum += edid->extensions;
|
|
||||||
edid->extensions = 0;
|
|
||||||
|
|
||||||
drm_mode_connector_update_edid_property(connector, edid);
|
|
||||||
ret = drm_add_edid_modes(connector, edid);
|
|
||||||
kfree(edid);
|
|
||||||
return ret;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int udl_mode_valid(struct drm_connector *connector,
|
static int udl_mode_valid(struct drm_connector *connector,
|
||||||
@@ -96,8 +121,26 @@ static int udl_mode_valid(struct drm_connector *connector,
|
|||||||
static enum drm_connector_status
|
static enum drm_connector_status
|
||||||
udl_detect(struct drm_connector *connector, bool force)
|
udl_detect(struct drm_connector *connector, bool force)
|
||||||
{
|
{
|
||||||
if (drm_dev_is_unplugged(connector->dev))
|
u8 *edid_buff = NULL;
|
||||||
|
int edid_buff_size = 0;
|
||||||
|
struct udl_device *udl = connector->dev->dev_private;
|
||||||
|
struct udl_drm_connector *udl_connector =
|
||||||
|
container_of(connector,
|
||||||
|
struct udl_drm_connector,
|
||||||
|
connector);
|
||||||
|
|
||||||
|
/* cleanup previous edid */
|
||||||
|
if (udl_connector->edid != NULL) {
|
||||||
|
kfree(udl_connector->edid);
|
||||||
|
udl_connector->edid = NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
if (!udl_get_edid(udl, &edid_buff, &edid_buff_size))
|
||||||
return connector_status_disconnected;
|
return connector_status_disconnected;
|
||||||
|
|
||||||
|
udl_connector->edid = (struct edid *)edid_buff;
|
||||||
|
|
||||||
return connector_status_connected;
|
return connector_status_connected;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -117,8 +160,14 @@ static int udl_connector_set_property(struct drm_connector *connector,
|
|||||||
|
|
||||||
static void udl_connector_destroy(struct drm_connector *connector)
|
static void udl_connector_destroy(struct drm_connector *connector)
|
||||||
{
|
{
|
||||||
|
struct udl_drm_connector *udl_connector =
|
||||||
|
container_of(connector,
|
||||||
|
struct udl_drm_connector,
|
||||||
|
connector);
|
||||||
|
|
||||||
drm_connector_unregister(connector);
|
drm_connector_unregister(connector);
|
||||||
drm_connector_cleanup(connector);
|
drm_connector_cleanup(connector);
|
||||||
|
kfree(udl_connector->edid);
|
||||||
kfree(connector);
|
kfree(connector);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -138,17 +187,22 @@ static const struct drm_connector_funcs udl_connector_funcs = {
|
|||||||
|
|
||||||
int udl_connector_init(struct drm_device *dev, struct drm_encoder *encoder)
|
int udl_connector_init(struct drm_device *dev, struct drm_encoder *encoder)
|
||||||
{
|
{
|
||||||
|
struct udl_drm_connector *udl_connector;
|
||||||
struct drm_connector *connector;
|
struct drm_connector *connector;
|
||||||
|
|
||||||
connector = kzalloc(sizeof(struct drm_connector), GFP_KERNEL);
|
udl_connector = kzalloc(sizeof(struct udl_drm_connector), GFP_KERNEL);
|
||||||
if (!connector)
|
if (!udl_connector)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
drm_connector_init(dev, connector, &udl_connector_funcs, DRM_MODE_CONNECTOR_DVII);
|
connector = &udl_connector->connector;
|
||||||
|
drm_connector_init(dev, connector, &udl_connector_funcs,
|
||||||
|
DRM_MODE_CONNECTOR_DVII);
|
||||||
drm_connector_helper_add(connector, &udl_connector_helper_funcs);
|
drm_connector_helper_add(connector, &udl_connector_helper_funcs);
|
||||||
|
|
||||||
drm_connector_register(connector);
|
drm_connector_register(connector);
|
||||||
drm_mode_connector_attach_encoder(connector, encoder);
|
drm_mode_connector_attach_encoder(connector, encoder);
|
||||||
|
connector->polled = DRM_CONNECTOR_POLL_HPD |
|
||||||
|
DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|||||||
13
drivers/gpu/drm/udl/udl_connector.h
Normal file
13
drivers/gpu/drm/udl/udl_connector.h
Normal file
@@ -0,0 +1,13 @@
|
|||||||
|
#ifndef __UDL_CONNECTOR_H__
|
||||||
|
#define __UDL_CONNECTOR_H__
|
||||||
|
|
||||||
|
#include <drm/drm_crtc.h>
|
||||||
|
|
||||||
|
struct udl_drm_connector {
|
||||||
|
struct drm_connector connector;
|
||||||
|
/* last udl_detect edid */
|
||||||
|
struct edid *edid;
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
|
#endif //__UDL_CONNECTOR_H__
|
||||||
@@ -14,6 +14,9 @@
|
|||||||
static int udl_usb_suspend(struct usb_interface *interface,
|
static int udl_usb_suspend(struct usb_interface *interface,
|
||||||
pm_message_t message)
|
pm_message_t message)
|
||||||
{
|
{
|
||||||
|
struct drm_device *dev = usb_get_intfdata(interface);
|
||||||
|
|
||||||
|
drm_kms_helper_poll_disable(dev);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -21,6 +24,7 @@ static int udl_usb_resume(struct usb_interface *interface)
|
|||||||
{
|
{
|
||||||
struct drm_device *dev = usb_get_intfdata(interface);
|
struct drm_device *dev = usb_get_intfdata(interface);
|
||||||
|
|
||||||
|
drm_kms_helper_poll_enable(dev);
|
||||||
udl_modeset_restore(dev);
|
udl_modeset_restore(dev);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -11,6 +11,7 @@
|
|||||||
* more details.
|
* more details.
|
||||||
*/
|
*/
|
||||||
#include <drm/drmP.h>
|
#include <drm/drmP.h>
|
||||||
|
#include <drm/drm_crtc_helper.h>
|
||||||
#include "udl_drv.h"
|
#include "udl_drv.h"
|
||||||
|
|
||||||
/* -BULK_SIZE as per usb-skeleton. Can we get full page and avoid overhead? */
|
/* -BULK_SIZE as per usb-skeleton. Can we get full page and avoid overhead? */
|
||||||
@@ -350,6 +351,8 @@ int udl_driver_load(struct drm_device *dev, unsigned long flags)
|
|||||||
if (ret)
|
if (ret)
|
||||||
goto err_fb;
|
goto err_fb;
|
||||||
|
|
||||||
|
drm_kms_helper_poll_init(dev);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
err_fb:
|
err_fb:
|
||||||
udl_fbdev_cleanup(dev);
|
udl_fbdev_cleanup(dev);
|
||||||
@@ -371,6 +374,8 @@ void udl_driver_unload(struct drm_device *dev)
|
|||||||
{
|
{
|
||||||
struct udl_device *udl = dev->dev_private;
|
struct udl_device *udl = dev->dev_private;
|
||||||
|
|
||||||
|
drm_kms_helper_poll_fini(dev);
|
||||||
|
|
||||||
if (udl->urbs.count)
|
if (udl->urbs.count)
|
||||||
udl_free_urb_list(dev);
|
udl_free_urb_list(dev);
|
||||||
|
|
||||||
|
|||||||
@@ -53,6 +53,17 @@ static void vc4_bo_stats_dump(struct vc4_dev *vc4)
|
|||||||
vc4->bo_labels[i].size_allocated / 1024,
|
vc4->bo_labels[i].size_allocated / 1024,
|
||||||
vc4->bo_labels[i].num_allocated);
|
vc4->bo_labels[i].num_allocated);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
mutex_lock(&vc4->purgeable.lock);
|
||||||
|
if (vc4->purgeable.num)
|
||||||
|
DRM_INFO("%30s: %6zdkb BOs (%d)\n", "userspace BO cache",
|
||||||
|
vc4->purgeable.size / 1024, vc4->purgeable.num);
|
||||||
|
|
||||||
|
if (vc4->purgeable.purged_num)
|
||||||
|
DRM_INFO("%30s: %6zdkb BOs (%d)\n", "total purged BO",
|
||||||
|
vc4->purgeable.purged_size / 1024,
|
||||||
|
vc4->purgeable.purged_num);
|
||||||
|
mutex_unlock(&vc4->purgeable.lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_DEBUG_FS
|
#ifdef CONFIG_DEBUG_FS
|
||||||
@@ -75,6 +86,17 @@ int vc4_bo_stats_debugfs(struct seq_file *m, void *unused)
|
|||||||
}
|
}
|
||||||
mutex_unlock(&vc4->bo_lock);
|
mutex_unlock(&vc4->bo_lock);
|
||||||
|
|
||||||
|
mutex_lock(&vc4->purgeable.lock);
|
||||||
|
if (vc4->purgeable.num)
|
||||||
|
seq_printf(m, "%30s: %6dkb BOs (%d)\n", "userspace BO cache",
|
||||||
|
vc4->purgeable.size / 1024, vc4->purgeable.num);
|
||||||
|
|
||||||
|
if (vc4->purgeable.purged_num)
|
||||||
|
seq_printf(m, "%30s: %6dkb BOs (%d)\n", "total purged BO",
|
||||||
|
vc4->purgeable.purged_size / 1024,
|
||||||
|
vc4->purgeable.purged_num);
|
||||||
|
mutex_unlock(&vc4->purgeable.lock);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
@@ -247,6 +269,109 @@ static void vc4_bo_cache_purge(struct drm_device *dev)
|
|||||||
mutex_unlock(&vc4->bo_lock);
|
mutex_unlock(&vc4->bo_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void vc4_bo_add_to_purgeable_pool(struct vc4_bo *bo)
|
||||||
|
{
|
||||||
|
struct vc4_dev *vc4 = to_vc4_dev(bo->base.base.dev);
|
||||||
|
|
||||||
|
mutex_lock(&vc4->purgeable.lock);
|
||||||
|
list_add_tail(&bo->size_head, &vc4->purgeable.list);
|
||||||
|
vc4->purgeable.num++;
|
||||||
|
vc4->purgeable.size += bo->base.base.size;
|
||||||
|
mutex_unlock(&vc4->purgeable.lock);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void vc4_bo_remove_from_purgeable_pool_locked(struct vc4_bo *bo)
|
||||||
|
{
|
||||||
|
struct vc4_dev *vc4 = to_vc4_dev(bo->base.base.dev);
|
||||||
|
|
||||||
|
/* list_del_init() is used here because the caller might release
|
||||||
|
* the purgeable lock in order to acquire the madv one and update the
|
||||||
|
* madv status.
|
||||||
|
* During this short period of time a user might decide to mark
|
||||||
|
* the BO as unpurgeable, and if bo->madv is set to
|
||||||
|
* VC4_MADV_DONTNEED it will try to remove the BO from the
|
||||||
|
* purgeable list which will fail if the ->next/prev fields
|
||||||
|
* are set to LIST_POISON1/LIST_POISON2 (which is what
|
||||||
|
* list_del() does).
|
||||||
|
* Re-initializing the list element guarantees that list_del()
|
||||||
|
* will work correctly even if it's a NOP.
|
||||||
|
*/
|
||||||
|
list_del_init(&bo->size_head);
|
||||||
|
vc4->purgeable.num--;
|
||||||
|
vc4->purgeable.size -= bo->base.base.size;
|
||||||
|
}
|
||||||
|
|
||||||
|
void vc4_bo_remove_from_purgeable_pool(struct vc4_bo *bo)
|
||||||
|
{
|
||||||
|
struct vc4_dev *vc4 = to_vc4_dev(bo->base.base.dev);
|
||||||
|
|
||||||
|
mutex_lock(&vc4->purgeable.lock);
|
||||||
|
vc4_bo_remove_from_purgeable_pool_locked(bo);
|
||||||
|
mutex_unlock(&vc4->purgeable.lock);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void vc4_bo_purge(struct drm_gem_object *obj)
|
||||||
|
{
|
||||||
|
struct vc4_bo *bo = to_vc4_bo(obj);
|
||||||
|
struct drm_device *dev = obj->dev;
|
||||||
|
|
||||||
|
WARN_ON(!mutex_is_locked(&bo->madv_lock));
|
||||||
|
WARN_ON(bo->madv != VC4_MADV_DONTNEED);
|
||||||
|
|
||||||
|
drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
|
||||||
|
|
||||||
|
dma_free_wc(dev->dev, obj->size, bo->base.vaddr, bo->base.paddr);
|
||||||
|
bo->base.vaddr = NULL;
|
||||||
|
bo->madv = __VC4_MADV_PURGED;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void vc4_bo_userspace_cache_purge(struct drm_device *dev)
|
||||||
|
{
|
||||||
|
struct vc4_dev *vc4 = to_vc4_dev(dev);
|
||||||
|
|
||||||
|
mutex_lock(&vc4->purgeable.lock);
|
||||||
|
while (!list_empty(&vc4->purgeable.list)) {
|
||||||
|
struct vc4_bo *bo = list_first_entry(&vc4->purgeable.list,
|
||||||
|
struct vc4_bo, size_head);
|
||||||
|
struct drm_gem_object *obj = &bo->base.base;
|
||||||
|
size_t purged_size = 0;
|
||||||
|
|
||||||
|
vc4_bo_remove_from_purgeable_pool_locked(bo);
|
||||||
|
|
||||||
|
/* Release the purgeable lock while we're purging the BO so
|
||||||
|
* that other people can continue inserting things in the
|
||||||
|
* purgeable pool without having to wait for all BOs to be
|
||||||
|
* purged.
|
||||||
|
*/
|
||||||
|
mutex_unlock(&vc4->purgeable.lock);
|
||||||
|
mutex_lock(&bo->madv_lock);
|
||||||
|
|
||||||
|
/* Since we released the purgeable pool lock before acquiring
|
||||||
|
* the BO madv one, the user may have marked the BO as WILLNEED
|
||||||
|
* and re-used it in the meantime.
|
||||||
|
* Before purging the BO we need to make sure
|
||||||
|
* - it is still marked as DONTNEED
|
||||||
|
* - it has not been re-inserted in the purgeable list
|
||||||
|
* - it is not used by HW blocks
|
||||||
|
* If one of these conditions is not met, just skip the entry.
|
||||||
|
*/
|
||||||
|
if (bo->madv == VC4_MADV_DONTNEED &&
|
||||||
|
list_empty(&bo->size_head) &&
|
||||||
|
!refcount_read(&bo->usecnt)) {
|
||||||
|
purged_size = bo->base.base.size;
|
||||||
|
vc4_bo_purge(obj);
|
||||||
|
}
|
||||||
|
mutex_unlock(&bo->madv_lock);
|
||||||
|
mutex_lock(&vc4->purgeable.lock);
|
||||||
|
|
||||||
|
if (purged_size) {
|
||||||
|
vc4->purgeable.purged_size += purged_size;
|
||||||
|
vc4->purgeable.purged_num++;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
mutex_unlock(&vc4->purgeable.lock);
|
||||||
|
}
|
||||||
|
|
||||||
static struct vc4_bo *vc4_bo_get_from_cache(struct drm_device *dev,
|
static struct vc4_bo *vc4_bo_get_from_cache(struct drm_device *dev,
|
||||||
uint32_t size,
|
uint32_t size,
|
||||||
enum vc4_kernel_bo_type type)
|
enum vc4_kernel_bo_type type)
|
||||||
@@ -293,6 +418,9 @@ struct drm_gem_object *vc4_create_object(struct drm_device *dev, size_t size)
|
|||||||
if (!bo)
|
if (!bo)
|
||||||
return ERR_PTR(-ENOMEM);
|
return ERR_PTR(-ENOMEM);
|
||||||
|
|
||||||
|
bo->madv = VC4_MADV_WILLNEED;
|
||||||
|
refcount_set(&bo->usecnt, 0);
|
||||||
|
mutex_init(&bo->madv_lock);
|
||||||
mutex_lock(&vc4->bo_lock);
|
mutex_lock(&vc4->bo_lock);
|
||||||
bo->label = VC4_BO_TYPE_KERNEL;
|
bo->label = VC4_BO_TYPE_KERNEL;
|
||||||
vc4->bo_labels[VC4_BO_TYPE_KERNEL].num_allocated++;
|
vc4->bo_labels[VC4_BO_TYPE_KERNEL].num_allocated++;
|
||||||
@@ -330,16 +458,38 @@ struct vc4_bo *vc4_bo_create(struct drm_device *dev, size_t unaligned_size,
|
|||||||
* CMA allocations we've got laying around and try again.
|
* CMA allocations we've got laying around and try again.
|
||||||
*/
|
*/
|
||||||
vc4_bo_cache_purge(dev);
|
vc4_bo_cache_purge(dev);
|
||||||
|
|
||||||
cma_obj = drm_gem_cma_create(dev, size);
|
cma_obj = drm_gem_cma_create(dev, size);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (IS_ERR(cma_obj)) {
|
||||||
|
/*
|
||||||
|
* Still not enough CMA memory, purge the userspace BO
|
||||||
|
* cache and retry.
|
||||||
|
* This is sub-optimal since we purge the whole userspace
|
||||||
|
* BO cache which forces user that want to re-use the BO to
|
||||||
|
* restore its initial content.
|
||||||
|
* Ideally, we should purge entries one by one and retry
|
||||||
|
* after each to see if CMA allocation succeeds. Or even
|
||||||
|
* better, try to find an entry with at least the same
|
||||||
|
* size.
|
||||||
|
*/
|
||||||
|
vc4_bo_userspace_cache_purge(dev);
|
||||||
|
cma_obj = drm_gem_cma_create(dev, size);
|
||||||
|
}
|
||||||
|
|
||||||
if (IS_ERR(cma_obj)) {
|
if (IS_ERR(cma_obj)) {
|
||||||
DRM_ERROR("Failed to allocate from CMA:\n");
|
DRM_ERROR("Failed to allocate from CMA:\n");
|
||||||
vc4_bo_stats_dump(vc4);
|
vc4_bo_stats_dump(vc4);
|
||||||
return ERR_PTR(-ENOMEM);
|
return ERR_PTR(-ENOMEM);
|
||||||
}
|
}
|
||||||
}
|
|
||||||
bo = to_vc4_bo(&cma_obj->base);
|
bo = to_vc4_bo(&cma_obj->base);
|
||||||
|
|
||||||
|
/* By default, BOs do not support the MADV ioctl. This will be enabled
|
||||||
|
* only on BOs that are exposed to userspace (V3D, V3D_SHADER and DUMB
|
||||||
|
* BOs).
|
||||||
|
*/
|
||||||
|
bo->madv = __VC4_MADV_NOTSUPP;
|
||||||
|
|
||||||
mutex_lock(&vc4->bo_lock);
|
mutex_lock(&vc4->bo_lock);
|
||||||
vc4_bo_set_label(&cma_obj->base, type);
|
vc4_bo_set_label(&cma_obj->base, type);
|
||||||
mutex_unlock(&vc4->bo_lock);
|
mutex_unlock(&vc4->bo_lock);
|
||||||
@@ -365,6 +515,8 @@ int vc4_dumb_create(struct drm_file *file_priv,
|
|||||||
if (IS_ERR(bo))
|
if (IS_ERR(bo))
|
||||||
return PTR_ERR(bo);
|
return PTR_ERR(bo);
|
||||||
|
|
||||||
|
bo->madv = VC4_MADV_WILLNEED;
|
||||||
|
|
||||||
ret = drm_gem_handle_create(file_priv, &bo->base.base, &args->handle);
|
ret = drm_gem_handle_create(file_priv, &bo->base.base, &args->handle);
|
||||||
drm_gem_object_put_unlocked(&bo->base.base);
|
drm_gem_object_put_unlocked(&bo->base.base);
|
||||||
|
|
||||||
@@ -403,6 +555,12 @@ void vc4_free_object(struct drm_gem_object *gem_bo)
|
|||||||
struct vc4_bo *bo = to_vc4_bo(gem_bo);
|
struct vc4_bo *bo = to_vc4_bo(gem_bo);
|
||||||
struct list_head *cache_list;
|
struct list_head *cache_list;
|
||||||
|
|
||||||
|
/* Remove the BO from the purgeable list. */
|
||||||
|
mutex_lock(&bo->madv_lock);
|
||||||
|
if (bo->madv == VC4_MADV_DONTNEED && !refcount_read(&bo->usecnt))
|
||||||
|
vc4_bo_remove_from_purgeable_pool(bo);
|
||||||
|
mutex_unlock(&bo->madv_lock);
|
||||||
|
|
||||||
mutex_lock(&vc4->bo_lock);
|
mutex_lock(&vc4->bo_lock);
|
||||||
/* If the object references someone else's memory, we can't cache it.
|
/* If the object references someone else's memory, we can't cache it.
|
||||||
*/
|
*/
|
||||||
@@ -418,7 +576,8 @@ void vc4_free_object(struct drm_gem_object *gem_bo)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* If this object was partially constructed but CMA allocation
|
/* If this object was partially constructed but CMA allocation
|
||||||
* had failed, just free it.
|
* had failed, just free it. Can also happen when the BO has been
|
||||||
|
* purged.
|
||||||
*/
|
*/
|
||||||
if (!bo->base.vaddr) {
|
if (!bo->base.vaddr) {
|
||||||
vc4_bo_destroy(bo);
|
vc4_bo_destroy(bo);
|
||||||
@@ -437,6 +596,10 @@ void vc4_free_object(struct drm_gem_object *gem_bo)
|
|||||||
bo->validated_shader = NULL;
|
bo->validated_shader = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Reset madv and usecnt before adding the BO to the cache. */
|
||||||
|
bo->madv = __VC4_MADV_NOTSUPP;
|
||||||
|
refcount_set(&bo->usecnt, 0);
|
||||||
|
|
||||||
bo->t_format = false;
|
bo->t_format = false;
|
||||||
bo->free_time = jiffies;
|
bo->free_time = jiffies;
|
||||||
list_add(&bo->size_head, cache_list);
|
list_add(&bo->size_head, cache_list);
|
||||||
@@ -461,6 +624,56 @@ static void vc4_bo_cache_time_work(struct work_struct *work)
|
|||||||
mutex_unlock(&vc4->bo_lock);
|
mutex_unlock(&vc4->bo_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int vc4_bo_inc_usecnt(struct vc4_bo *bo)
|
||||||
|
{
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
/* Fast path: if the BO is already retained by someone, no need to
|
||||||
|
* check the madv status.
|
||||||
|
*/
|
||||||
|
if (refcount_inc_not_zero(&bo->usecnt))
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
mutex_lock(&bo->madv_lock);
|
||||||
|
switch (bo->madv) {
|
||||||
|
case VC4_MADV_WILLNEED:
|
||||||
|
refcount_inc(&bo->usecnt);
|
||||||
|
ret = 0;
|
||||||
|
break;
|
||||||
|
case VC4_MADV_DONTNEED:
|
||||||
|
/* We shouldn't use a BO marked as purgeable if at least
|
||||||
|
* someone else retained its content by incrementing usecnt.
|
||||||
|
* Luckily the BO hasn't been purged yet, but something wrong
|
||||||
|
* is happening here. Just throw an error instead of
|
||||||
|
* authorizing this use case.
|
||||||
|
*/
|
||||||
|
case __VC4_MADV_PURGED:
|
||||||
|
/* We can't use a purged BO. */
|
||||||
|
default:
|
||||||
|
/* Invalid madv value. */
|
||||||
|
ret = -EINVAL;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
mutex_unlock(&bo->madv_lock);
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
void vc4_bo_dec_usecnt(struct vc4_bo *bo)
|
||||||
|
{
|
||||||
|
/* Fast path: if the BO is still retained by someone, no need to test
|
||||||
|
* the madv value.
|
||||||
|
*/
|
||||||
|
if (refcount_dec_not_one(&bo->usecnt))
|
||||||
|
return;
|
||||||
|
|
||||||
|
mutex_lock(&bo->madv_lock);
|
||||||
|
if (refcount_dec_and_test(&bo->usecnt) &&
|
||||||
|
bo->madv == VC4_MADV_DONTNEED)
|
||||||
|
vc4_bo_add_to_purgeable_pool(bo);
|
||||||
|
mutex_unlock(&bo->madv_lock);
|
||||||
|
}
|
||||||
|
|
||||||
static void vc4_bo_cache_time_timer(unsigned long data)
|
static void vc4_bo_cache_time_timer(unsigned long data)
|
||||||
{
|
{
|
||||||
struct drm_device *dev = (struct drm_device *)data;
|
struct drm_device *dev = (struct drm_device *)data;
|
||||||
@@ -480,18 +693,52 @@ struct dma_buf *
|
|||||||
vc4_prime_export(struct drm_device *dev, struct drm_gem_object *obj, int flags)
|
vc4_prime_export(struct drm_device *dev, struct drm_gem_object *obj, int flags)
|
||||||
{
|
{
|
||||||
struct vc4_bo *bo = to_vc4_bo(obj);
|
struct vc4_bo *bo = to_vc4_bo(obj);
|
||||||
|
struct dma_buf *dmabuf;
|
||||||
|
int ret;
|
||||||
|
|
||||||
if (bo->validated_shader) {
|
if (bo->validated_shader) {
|
||||||
DRM_DEBUG("Attempting to export shader BO\n");
|
DRM_DEBUG("Attempting to export shader BO\n");
|
||||||
return ERR_PTR(-EINVAL);
|
return ERR_PTR(-EINVAL);
|
||||||
}
|
}
|
||||||
|
|
||||||
return drm_gem_prime_export(dev, obj, flags);
|
/* Note: as soon as the BO is exported it becomes unpurgeable, because
|
||||||
|
* noone ever decrements the usecnt even if the reference held by the
|
||||||
|
* exported BO is released. This shouldn't be a problem since we don't
|
||||||
|
* expect exported BOs to be marked as purgeable.
|
||||||
|
*/
|
||||||
|
ret = vc4_bo_inc_usecnt(bo);
|
||||||
|
if (ret) {
|
||||||
|
DRM_ERROR("Failed to increment BO usecnt\n");
|
||||||
|
return ERR_PTR(ret);
|
||||||
|
}
|
||||||
|
|
||||||
|
dmabuf = drm_gem_prime_export(dev, obj, flags);
|
||||||
|
if (IS_ERR(dmabuf))
|
||||||
|
vc4_bo_dec_usecnt(bo);
|
||||||
|
|
||||||
|
return dmabuf;
|
||||||
|
}
|
||||||
|
|
||||||
|
int vc4_fault(struct vm_fault *vmf)
|
||||||
|
{
|
||||||
|
struct vm_area_struct *vma = vmf->vma;
|
||||||
|
struct drm_gem_object *obj = vma->vm_private_data;
|
||||||
|
struct vc4_bo *bo = to_vc4_bo(obj);
|
||||||
|
|
||||||
|
/* The only reason we would end up here is when user-space accesses
|
||||||
|
* BO's memory after it's been purged.
|
||||||
|
*/
|
||||||
|
mutex_lock(&bo->madv_lock);
|
||||||
|
WARN_ON(bo->madv != __VC4_MADV_PURGED);
|
||||||
|
mutex_unlock(&bo->madv_lock);
|
||||||
|
|
||||||
|
return VM_FAULT_SIGBUS;
|
||||||
}
|
}
|
||||||
|
|
||||||
int vc4_mmap(struct file *filp, struct vm_area_struct *vma)
|
int vc4_mmap(struct file *filp, struct vm_area_struct *vma)
|
||||||
{
|
{
|
||||||
struct drm_gem_object *gem_obj;
|
struct drm_gem_object *gem_obj;
|
||||||
|
unsigned long vm_pgoff;
|
||||||
struct vc4_bo *bo;
|
struct vc4_bo *bo;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
@@ -507,16 +754,36 @@ int vc4_mmap(struct file *filp, struct vm_area_struct *vma)
|
|||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (bo->madv != VC4_MADV_WILLNEED) {
|
||||||
|
DRM_DEBUG("mmaping of %s BO not allowed\n",
|
||||||
|
bo->madv == VC4_MADV_DONTNEED ?
|
||||||
|
"purgeable" : "purged");
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Clear the VM_PFNMAP flag that was set by drm_gem_mmap(), and set the
|
* Clear the VM_PFNMAP flag that was set by drm_gem_mmap(), and set the
|
||||||
* vm_pgoff (used as a fake buffer offset by DRM) to 0 as we want to map
|
* vm_pgoff (used as a fake buffer offset by DRM) to 0 as we want to map
|
||||||
* the whole buffer.
|
* the whole buffer.
|
||||||
*/
|
*/
|
||||||
vma->vm_flags &= ~VM_PFNMAP;
|
vma->vm_flags &= ~VM_PFNMAP;
|
||||||
vma->vm_pgoff = 0;
|
|
||||||
|
|
||||||
|
/* This ->vm_pgoff dance is needed to make all parties happy:
|
||||||
|
* - dma_mmap_wc() uses ->vm_pgoff as an offset within the allocated
|
||||||
|
* mem-region, hence the need to set it to zero (the value set by
|
||||||
|
* the DRM core is a virtual offset encoding the GEM object-id)
|
||||||
|
* - the mmap() core logic needs ->vm_pgoff to be restored to its
|
||||||
|
* initial value before returning from this function because it
|
||||||
|
* encodes the offset of this GEM in the dev->anon_inode pseudo-file
|
||||||
|
* and this information will be used when we invalidate userspace
|
||||||
|
* mappings with drm_vma_node_unmap() (called from vc4_gem_purge()).
|
||||||
|
*/
|
||||||
|
vm_pgoff = vma->vm_pgoff;
|
||||||
|
vma->vm_pgoff = 0;
|
||||||
ret = dma_mmap_wc(bo->base.base.dev->dev, vma, bo->base.vaddr,
|
ret = dma_mmap_wc(bo->base.base.dev->dev, vma, bo->base.vaddr,
|
||||||
bo->base.paddr, vma->vm_end - vma->vm_start);
|
bo->base.paddr, vma->vm_end - vma->vm_start);
|
||||||
|
vma->vm_pgoff = vm_pgoff;
|
||||||
|
|
||||||
if (ret)
|
if (ret)
|
||||||
drm_gem_vm_close(vma);
|
drm_gem_vm_close(vma);
|
||||||
|
|
||||||
@@ -580,6 +847,8 @@ int vc4_create_bo_ioctl(struct drm_device *dev, void *data,
|
|||||||
if (IS_ERR(bo))
|
if (IS_ERR(bo))
|
||||||
return PTR_ERR(bo);
|
return PTR_ERR(bo);
|
||||||
|
|
||||||
|
bo->madv = VC4_MADV_WILLNEED;
|
||||||
|
|
||||||
ret = drm_gem_handle_create(file_priv, &bo->base.base, &args->handle);
|
ret = drm_gem_handle_create(file_priv, &bo->base.base, &args->handle);
|
||||||
drm_gem_object_put_unlocked(&bo->base.base);
|
drm_gem_object_put_unlocked(&bo->base.base);
|
||||||
|
|
||||||
@@ -633,6 +902,8 @@ vc4_create_shader_bo_ioctl(struct drm_device *dev, void *data,
|
|||||||
if (IS_ERR(bo))
|
if (IS_ERR(bo))
|
||||||
return PTR_ERR(bo);
|
return PTR_ERR(bo);
|
||||||
|
|
||||||
|
bo->madv = VC4_MADV_WILLNEED;
|
||||||
|
|
||||||
if (copy_from_user(bo->base.vaddr,
|
if (copy_from_user(bo->base.vaddr,
|
||||||
(void __user *)(uintptr_t)args->data,
|
(void __user *)(uintptr_t)args->data,
|
||||||
args->size)) {
|
args->size)) {
|
||||||
|
|||||||
@@ -100,6 +100,7 @@ static int vc4_get_param_ioctl(struct drm_device *dev, void *data,
|
|||||||
case DRM_VC4_PARAM_SUPPORTS_ETC1:
|
case DRM_VC4_PARAM_SUPPORTS_ETC1:
|
||||||
case DRM_VC4_PARAM_SUPPORTS_THREADED_FS:
|
case DRM_VC4_PARAM_SUPPORTS_THREADED_FS:
|
||||||
case DRM_VC4_PARAM_SUPPORTS_FIXED_RCL_ORDER:
|
case DRM_VC4_PARAM_SUPPORTS_FIXED_RCL_ORDER:
|
||||||
|
case DRM_VC4_PARAM_SUPPORTS_MADVISE:
|
||||||
args->value = true;
|
args->value = true;
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
@@ -117,6 +118,12 @@ static void vc4_lastclose(struct drm_device *dev)
|
|||||||
drm_fbdev_cma_restore_mode(vc4->fbdev);
|
drm_fbdev_cma_restore_mode(vc4->fbdev);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static const struct vm_operations_struct vc4_vm_ops = {
|
||||||
|
.fault = vc4_fault,
|
||||||
|
.open = drm_gem_vm_open,
|
||||||
|
.close = drm_gem_vm_close,
|
||||||
|
};
|
||||||
|
|
||||||
static const struct file_operations vc4_drm_fops = {
|
static const struct file_operations vc4_drm_fops = {
|
||||||
.owner = THIS_MODULE,
|
.owner = THIS_MODULE,
|
||||||
.open = drm_open,
|
.open = drm_open,
|
||||||
@@ -142,6 +149,7 @@ static const struct drm_ioctl_desc vc4_drm_ioctls[] = {
|
|||||||
DRM_IOCTL_DEF_DRV(VC4_SET_TILING, vc4_set_tiling_ioctl, DRM_RENDER_ALLOW),
|
DRM_IOCTL_DEF_DRV(VC4_SET_TILING, vc4_set_tiling_ioctl, DRM_RENDER_ALLOW),
|
||||||
DRM_IOCTL_DEF_DRV(VC4_GET_TILING, vc4_get_tiling_ioctl, DRM_RENDER_ALLOW),
|
DRM_IOCTL_DEF_DRV(VC4_GET_TILING, vc4_get_tiling_ioctl, DRM_RENDER_ALLOW),
|
||||||
DRM_IOCTL_DEF_DRV(VC4_LABEL_BO, vc4_label_bo_ioctl, DRM_RENDER_ALLOW),
|
DRM_IOCTL_DEF_DRV(VC4_LABEL_BO, vc4_label_bo_ioctl, DRM_RENDER_ALLOW),
|
||||||
|
DRM_IOCTL_DEF_DRV(VC4_GEM_MADVISE, vc4_gem_madvise_ioctl, DRM_RENDER_ALLOW),
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct drm_driver vc4_drm_driver = {
|
static struct drm_driver vc4_drm_driver = {
|
||||||
@@ -166,7 +174,7 @@ static struct drm_driver vc4_drm_driver = {
|
|||||||
|
|
||||||
.gem_create_object = vc4_create_object,
|
.gem_create_object = vc4_create_object,
|
||||||
.gem_free_object_unlocked = vc4_free_object,
|
.gem_free_object_unlocked = vc4_free_object,
|
||||||
.gem_vm_ops = &drm_gem_cma_vm_ops,
|
.gem_vm_ops = &vc4_vm_ops,
|
||||||
|
|
||||||
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
|
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
|
||||||
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
|
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
|
||||||
|
|||||||
@@ -74,6 +74,19 @@ struct vc4_dev {
|
|||||||
/* Protects bo_cache and bo_labels. */
|
/* Protects bo_cache and bo_labels. */
|
||||||
struct mutex bo_lock;
|
struct mutex bo_lock;
|
||||||
|
|
||||||
|
/* Purgeable BO pool. All BOs in this pool can have their memory
|
||||||
|
* reclaimed if the driver is unable to allocate new BOs. We also
|
||||||
|
* keep stats related to the purge mechanism here.
|
||||||
|
*/
|
||||||
|
struct {
|
||||||
|
struct list_head list;
|
||||||
|
unsigned int num;
|
||||||
|
size_t size;
|
||||||
|
unsigned int purged_num;
|
||||||
|
size_t purged_size;
|
||||||
|
struct mutex lock;
|
||||||
|
} purgeable;
|
||||||
|
|
||||||
uint64_t dma_fence_context;
|
uint64_t dma_fence_context;
|
||||||
|
|
||||||
/* Sequence number for the last job queued in bin_job_list.
|
/* Sequence number for the last job queued in bin_job_list.
|
||||||
@@ -192,6 +205,16 @@ struct vc4_bo {
|
|||||||
* for user-allocated labels.
|
* for user-allocated labels.
|
||||||
*/
|
*/
|
||||||
int label;
|
int label;
|
||||||
|
|
||||||
|
/* Count the number of active users. This is needed to determine
|
||||||
|
* whether we can move the BO to the purgeable list or not (when the BO
|
||||||
|
* is used by the GPU or the display engine we can't purge it).
|
||||||
|
*/
|
||||||
|
refcount_t usecnt;
|
||||||
|
|
||||||
|
/* Store purgeable/purged state here */
|
||||||
|
u32 madv;
|
||||||
|
struct mutex madv_lock;
|
||||||
};
|
};
|
||||||
|
|
||||||
static inline struct vc4_bo *
|
static inline struct vc4_bo *
|
||||||
@@ -503,6 +526,7 @@ int vc4_get_hang_state_ioctl(struct drm_device *dev, void *data,
|
|||||||
struct drm_file *file_priv);
|
struct drm_file *file_priv);
|
||||||
int vc4_label_bo_ioctl(struct drm_device *dev, void *data,
|
int vc4_label_bo_ioctl(struct drm_device *dev, void *data,
|
||||||
struct drm_file *file_priv);
|
struct drm_file *file_priv);
|
||||||
|
int vc4_fault(struct vm_fault *vmf);
|
||||||
int vc4_mmap(struct file *filp, struct vm_area_struct *vma);
|
int vc4_mmap(struct file *filp, struct vm_area_struct *vma);
|
||||||
struct reservation_object *vc4_prime_res_obj(struct drm_gem_object *obj);
|
struct reservation_object *vc4_prime_res_obj(struct drm_gem_object *obj);
|
||||||
int vc4_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
|
int vc4_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
|
||||||
@@ -513,6 +537,10 @@ void *vc4_prime_vmap(struct drm_gem_object *obj);
|
|||||||
int vc4_bo_cache_init(struct drm_device *dev);
|
int vc4_bo_cache_init(struct drm_device *dev);
|
||||||
void vc4_bo_cache_destroy(struct drm_device *dev);
|
void vc4_bo_cache_destroy(struct drm_device *dev);
|
||||||
int vc4_bo_stats_debugfs(struct seq_file *m, void *arg);
|
int vc4_bo_stats_debugfs(struct seq_file *m, void *arg);
|
||||||
|
int vc4_bo_inc_usecnt(struct vc4_bo *bo);
|
||||||
|
void vc4_bo_dec_usecnt(struct vc4_bo *bo);
|
||||||
|
void vc4_bo_add_to_purgeable_pool(struct vc4_bo *bo);
|
||||||
|
void vc4_bo_remove_from_purgeable_pool(struct vc4_bo *bo);
|
||||||
|
|
||||||
/* vc4_crtc.c */
|
/* vc4_crtc.c */
|
||||||
extern struct platform_driver vc4_crtc_driver;
|
extern struct platform_driver vc4_crtc_driver;
|
||||||
@@ -557,6 +585,8 @@ void vc4_job_handle_completed(struct vc4_dev *vc4);
|
|||||||
int vc4_queue_seqno_cb(struct drm_device *dev,
|
int vc4_queue_seqno_cb(struct drm_device *dev,
|
||||||
struct vc4_seqno_cb *cb, uint64_t seqno,
|
struct vc4_seqno_cb *cb, uint64_t seqno,
|
||||||
void (*func)(struct vc4_seqno_cb *cb));
|
void (*func)(struct vc4_seqno_cb *cb));
|
||||||
|
int vc4_gem_madvise_ioctl(struct drm_device *dev, void *data,
|
||||||
|
struct drm_file *file_priv);
|
||||||
|
|
||||||
/* vc4_hdmi.c */
|
/* vc4_hdmi.c */
|
||||||
extern struct platform_driver vc4_hdmi_driver;
|
extern struct platform_driver vc4_hdmi_driver;
|
||||||
|
|||||||
@@ -1360,6 +1360,27 @@ static void dsi_handle_error(struct vc4_dsi *dsi,
|
|||||||
*ret = IRQ_HANDLED;
|
*ret = IRQ_HANDLED;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Initial handler for port 1 where we need the reg_dma workaround.
|
||||||
|
* The register DMA writes sleep, so we can't do it in the top half.
|
||||||
|
* Instead we use IRQF_ONESHOT so that the IRQ gets disabled in the
|
||||||
|
* parent interrupt contrller until our interrupt thread is done.
|
||||||
|
*/
|
||||||
|
static irqreturn_t vc4_dsi_irq_defer_to_thread_handler(int irq, void *data)
|
||||||
|
{
|
||||||
|
struct vc4_dsi *dsi = data;
|
||||||
|
u32 stat = DSI_PORT_READ(INT_STAT);
|
||||||
|
|
||||||
|
if (!stat)
|
||||||
|
return IRQ_NONE;
|
||||||
|
|
||||||
|
return IRQ_WAKE_THREAD;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Normal IRQ handler for port 0, or the threaded IRQ handler for port
|
||||||
|
* 1 where we need the reg_dma workaround.
|
||||||
|
*/
|
||||||
static irqreturn_t vc4_dsi_irq_handler(int irq, void *data)
|
static irqreturn_t vc4_dsi_irq_handler(int irq, void *data)
|
||||||
{
|
{
|
||||||
struct vc4_dsi *dsi = data;
|
struct vc4_dsi *dsi = data;
|
||||||
@@ -1539,6 +1560,13 @@ static int vc4_dsi_bind(struct device *dev, struct device *master, void *data)
|
|||||||
/* Clear any existing interrupt state. */
|
/* Clear any existing interrupt state. */
|
||||||
DSI_PORT_WRITE(INT_STAT, DSI_PORT_READ(INT_STAT));
|
DSI_PORT_WRITE(INT_STAT, DSI_PORT_READ(INT_STAT));
|
||||||
|
|
||||||
|
if (dsi->reg_dma_mem)
|
||||||
|
ret = devm_request_threaded_irq(dev, platform_get_irq(pdev, 0),
|
||||||
|
vc4_dsi_irq_defer_to_thread_handler,
|
||||||
|
vc4_dsi_irq_handler,
|
||||||
|
IRQF_ONESHOT,
|
||||||
|
"vc4 dsi", dsi);
|
||||||
|
else
|
||||||
ret = devm_request_irq(dev, platform_get_irq(pdev, 0),
|
ret = devm_request_irq(dev, platform_get_irq(pdev, 0),
|
||||||
vc4_dsi_irq_handler, 0, "vc4 dsi", dsi);
|
vc4_dsi_irq_handler, 0, "vc4 dsi", dsi);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
|
|||||||
@@ -188,11 +188,22 @@ vc4_save_hang_state(struct drm_device *dev)
|
|||||||
continue;
|
continue;
|
||||||
|
|
||||||
for (j = 0; j < exec[i]->bo_count; j++) {
|
for (j = 0; j < exec[i]->bo_count; j++) {
|
||||||
|
bo = to_vc4_bo(&exec[i]->bo[j]->base);
|
||||||
|
|
||||||
|
/* Retain BOs just in case they were marked purgeable.
|
||||||
|
* This prevents the BO from being purged before
|
||||||
|
* someone had a chance to dump the hang state.
|
||||||
|
*/
|
||||||
|
WARN_ON(!refcount_read(&bo->usecnt));
|
||||||
|
refcount_inc(&bo->usecnt);
|
||||||
drm_gem_object_get(&exec[i]->bo[j]->base);
|
drm_gem_object_get(&exec[i]->bo[j]->base);
|
||||||
kernel_state->bo[j + prev_idx] = &exec[i]->bo[j]->base;
|
kernel_state->bo[j + prev_idx] = &exec[i]->bo[j]->base;
|
||||||
}
|
}
|
||||||
|
|
||||||
list_for_each_entry(bo, &exec[i]->unref_list, unref_head) {
|
list_for_each_entry(bo, &exec[i]->unref_list, unref_head) {
|
||||||
|
/* No need to retain BOs coming from the ->unref_list
|
||||||
|
* because they are naturally unpurgeable.
|
||||||
|
*/
|
||||||
drm_gem_object_get(&bo->base.base);
|
drm_gem_object_get(&bo->base.base);
|
||||||
kernel_state->bo[j + prev_idx] = &bo->base.base;
|
kernel_state->bo[j + prev_idx] = &bo->base.base;
|
||||||
j++;
|
j++;
|
||||||
@@ -233,6 +244,26 @@ vc4_save_hang_state(struct drm_device *dev)
|
|||||||
state->fdbgs = V3D_READ(V3D_FDBGS);
|
state->fdbgs = V3D_READ(V3D_FDBGS);
|
||||||
state->errstat = V3D_READ(V3D_ERRSTAT);
|
state->errstat = V3D_READ(V3D_ERRSTAT);
|
||||||
|
|
||||||
|
/* We need to turn purgeable BOs into unpurgeable ones so that
|
||||||
|
* userspace has a chance to dump the hang state before the kernel
|
||||||
|
* decides to purge those BOs.
|
||||||
|
* Note that BO consistency at dump time cannot be guaranteed. For
|
||||||
|
* example, if the owner of these BOs decides to re-use them or mark
|
||||||
|
* them purgeable again there's nothing we can do to prevent it.
|
||||||
|
*/
|
||||||
|
for (i = 0; i < kernel_state->user_state.bo_count; i++) {
|
||||||
|
struct vc4_bo *bo = to_vc4_bo(kernel_state->bo[i]);
|
||||||
|
|
||||||
|
if (bo->madv == __VC4_MADV_NOTSUPP)
|
||||||
|
continue;
|
||||||
|
|
||||||
|
mutex_lock(&bo->madv_lock);
|
||||||
|
if (!WARN_ON(bo->madv == __VC4_MADV_PURGED))
|
||||||
|
bo->madv = VC4_MADV_WILLNEED;
|
||||||
|
refcount_dec(&bo->usecnt);
|
||||||
|
mutex_unlock(&bo->madv_lock);
|
||||||
|
}
|
||||||
|
|
||||||
spin_lock_irqsave(&vc4->job_lock, irqflags);
|
spin_lock_irqsave(&vc4->job_lock, irqflags);
|
||||||
if (vc4->hang_state) {
|
if (vc4->hang_state) {
|
||||||
spin_unlock_irqrestore(&vc4->job_lock, irqflags);
|
spin_unlock_irqrestore(&vc4->job_lock, irqflags);
|
||||||
@@ -639,9 +670,6 @@ vc4_queue_submit(struct drm_device *dev, struct vc4_exec_info *exec,
|
|||||||
* The command validator needs to reference BOs by their index within
|
* The command validator needs to reference BOs by their index within
|
||||||
* the submitted job's BO list. This does the validation of the job's
|
* the submitted job's BO list. This does the validation of the job's
|
||||||
* BO list and reference counting for the lifetime of the job.
|
* BO list and reference counting for the lifetime of the job.
|
||||||
*
|
|
||||||
* Note that this function doesn't need to unreference the BOs on
|
|
||||||
* failure, because that will happen at vc4_complete_exec() time.
|
|
||||||
*/
|
*/
|
||||||
static int
|
static int
|
||||||
vc4_cl_lookup_bos(struct drm_device *dev,
|
vc4_cl_lookup_bos(struct drm_device *dev,
|
||||||
@@ -693,16 +721,47 @@ vc4_cl_lookup_bos(struct drm_device *dev,
|
|||||||
DRM_DEBUG("Failed to look up GEM BO %d: %d\n",
|
DRM_DEBUG("Failed to look up GEM BO %d: %d\n",
|
||||||
i, handles[i]);
|
i, handles[i]);
|
||||||
ret = -EINVAL;
|
ret = -EINVAL;
|
||||||
spin_unlock(&file_priv->table_lock);
|
break;
|
||||||
goto fail;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
drm_gem_object_get(bo);
|
drm_gem_object_get(bo);
|
||||||
exec->bo[i] = (struct drm_gem_cma_object *)bo;
|
exec->bo[i] = (struct drm_gem_cma_object *)bo;
|
||||||
}
|
}
|
||||||
spin_unlock(&file_priv->table_lock);
|
spin_unlock(&file_priv->table_lock);
|
||||||
|
|
||||||
|
if (ret)
|
||||||
|
goto fail_put_bo;
|
||||||
|
|
||||||
|
for (i = 0; i < exec->bo_count; i++) {
|
||||||
|
ret = vc4_bo_inc_usecnt(to_vc4_bo(&exec->bo[i]->base));
|
||||||
|
if (ret)
|
||||||
|
goto fail_dec_usecnt;
|
||||||
|
}
|
||||||
|
|
||||||
|
kvfree(handles);
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
fail_dec_usecnt:
|
||||||
|
/* Decrease usecnt on acquired objects.
|
||||||
|
* We cannot rely on vc4_complete_exec() to release resources here,
|
||||||
|
* because vc4_complete_exec() has no information about which BO has
|
||||||
|
* had its ->usecnt incremented.
|
||||||
|
* To make things easier we just free everything explicitly and set
|
||||||
|
* exec->bo to NULL so that vc4_complete_exec() skips the 'BO release'
|
||||||
|
* step.
|
||||||
|
*/
|
||||||
|
for (i-- ; i >= 0; i--)
|
||||||
|
vc4_bo_dec_usecnt(to_vc4_bo(&exec->bo[i]->base));
|
||||||
|
|
||||||
|
fail_put_bo:
|
||||||
|
/* Release any reference to acquired objects. */
|
||||||
|
for (i = 0; i < exec->bo_count && exec->bo[i]; i++)
|
||||||
|
drm_gem_object_put_unlocked(&exec->bo[i]->base);
|
||||||
|
|
||||||
fail:
|
fail:
|
||||||
kvfree(handles);
|
kvfree(handles);
|
||||||
|
kvfree(exec->bo);
|
||||||
|
exec->bo = NULL;
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -833,8 +892,12 @@ vc4_complete_exec(struct drm_device *dev, struct vc4_exec_info *exec)
|
|||||||
dma_fence_signal(exec->fence);
|
dma_fence_signal(exec->fence);
|
||||||
|
|
||||||
if (exec->bo) {
|
if (exec->bo) {
|
||||||
for (i = 0; i < exec->bo_count; i++)
|
for (i = 0; i < exec->bo_count; i++) {
|
||||||
|
struct vc4_bo *bo = to_vc4_bo(&exec->bo[i]->base);
|
||||||
|
|
||||||
|
vc4_bo_dec_usecnt(bo);
|
||||||
drm_gem_object_put_unlocked(&exec->bo[i]->base);
|
drm_gem_object_put_unlocked(&exec->bo[i]->base);
|
||||||
|
}
|
||||||
kvfree(exec->bo);
|
kvfree(exec->bo);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1098,6 +1161,9 @@ vc4_gem_init(struct drm_device *dev)
|
|||||||
INIT_WORK(&vc4->job_done_work, vc4_job_done_work);
|
INIT_WORK(&vc4->job_done_work, vc4_job_done_work);
|
||||||
|
|
||||||
mutex_init(&vc4->power_lock);
|
mutex_init(&vc4->power_lock);
|
||||||
|
|
||||||
|
INIT_LIST_HEAD(&vc4->purgeable.list);
|
||||||
|
mutex_init(&vc4->purgeable.lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
@@ -1121,3 +1187,81 @@ vc4_gem_destroy(struct drm_device *dev)
|
|||||||
if (vc4->hang_state)
|
if (vc4->hang_state)
|
||||||
vc4_free_hang_state(dev, vc4->hang_state);
|
vc4_free_hang_state(dev, vc4->hang_state);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int vc4_gem_madvise_ioctl(struct drm_device *dev, void *data,
|
||||||
|
struct drm_file *file_priv)
|
||||||
|
{
|
||||||
|
struct drm_vc4_gem_madvise *args = data;
|
||||||
|
struct drm_gem_object *gem_obj;
|
||||||
|
struct vc4_bo *bo;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
switch (args->madv) {
|
||||||
|
case VC4_MADV_DONTNEED:
|
||||||
|
case VC4_MADV_WILLNEED:
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (args->pad != 0)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
gem_obj = drm_gem_object_lookup(file_priv, args->handle);
|
||||||
|
if (!gem_obj) {
|
||||||
|
DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle);
|
||||||
|
return -ENOENT;
|
||||||
|
}
|
||||||
|
|
||||||
|
bo = to_vc4_bo(gem_obj);
|
||||||
|
|
||||||
|
/* Only BOs exposed to userspace can be purged. */
|
||||||
|
if (bo->madv == __VC4_MADV_NOTSUPP) {
|
||||||
|
DRM_DEBUG("madvise not supported on this BO\n");
|
||||||
|
ret = -EINVAL;
|
||||||
|
goto out_put_gem;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Not sure it's safe to purge imported BOs. Let's just assume it's
|
||||||
|
* not until proven otherwise.
|
||||||
|
*/
|
||||||
|
if (gem_obj->import_attach) {
|
||||||
|
DRM_DEBUG("madvise not supported on imported BOs\n");
|
||||||
|
ret = -EINVAL;
|
||||||
|
goto out_put_gem;
|
||||||
|
}
|
||||||
|
|
||||||
|
mutex_lock(&bo->madv_lock);
|
||||||
|
|
||||||
|
if (args->madv == VC4_MADV_DONTNEED && bo->madv == VC4_MADV_WILLNEED &&
|
||||||
|
!refcount_read(&bo->usecnt)) {
|
||||||
|
/* If the BO is about to be marked as purgeable, is not used
|
||||||
|
* and is not already purgeable or purged, add it to the
|
||||||
|
* purgeable list.
|
||||||
|
*/
|
||||||
|
vc4_bo_add_to_purgeable_pool(bo);
|
||||||
|
} else if (args->madv == VC4_MADV_WILLNEED &&
|
||||||
|
bo->madv == VC4_MADV_DONTNEED &&
|
||||||
|
!refcount_read(&bo->usecnt)) {
|
||||||
|
/* The BO has not been purged yet, just remove it from
|
||||||
|
* the purgeable list.
|
||||||
|
*/
|
||||||
|
vc4_bo_remove_from_purgeable_pool(bo);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Save the purged state. */
|
||||||
|
args->retained = bo->madv != __VC4_MADV_PURGED;
|
||||||
|
|
||||||
|
/* Update internal madv state only if the bo was not purged. */
|
||||||
|
if (bo->madv != __VC4_MADV_PURGED)
|
||||||
|
bo->madv = args->madv;
|
||||||
|
|
||||||
|
mutex_unlock(&bo->madv_lock);
|
||||||
|
|
||||||
|
ret = 0;
|
||||||
|
|
||||||
|
out_put_gem:
|
||||||
|
drm_gem_object_put_unlocked(gem_obj);
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|||||||
@@ -23,6 +23,7 @@
|
|||||||
#include <drm/drm_fb_cma_helper.h>
|
#include <drm/drm_fb_cma_helper.h>
|
||||||
#include <drm/drm_plane_helper.h>
|
#include <drm/drm_plane_helper.h>
|
||||||
|
|
||||||
|
#include "uapi/drm/vc4_drm.h"
|
||||||
#include "vc4_drv.h"
|
#include "vc4_drv.h"
|
||||||
#include "vc4_regs.h"
|
#include "vc4_regs.h"
|
||||||
|
|
||||||
@@ -774,21 +775,40 @@ static int vc4_prepare_fb(struct drm_plane *plane,
|
|||||||
{
|
{
|
||||||
struct vc4_bo *bo;
|
struct vc4_bo *bo;
|
||||||
struct dma_fence *fence;
|
struct dma_fence *fence;
|
||||||
|
int ret;
|
||||||
|
|
||||||
if ((plane->state->fb == state->fb) || !state->fb)
|
if ((plane->state->fb == state->fb) || !state->fb)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
bo = to_vc4_bo(&drm_fb_cma_get_gem_obj(state->fb, 0)->base);
|
bo = to_vc4_bo(&drm_fb_cma_get_gem_obj(state->fb, 0)->base);
|
||||||
|
|
||||||
|
ret = vc4_bo_inc_usecnt(bo);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
|
||||||
fence = reservation_object_get_excl_rcu(bo->resv);
|
fence = reservation_object_get_excl_rcu(bo->resv);
|
||||||
drm_atomic_set_fence_for_plane(state, fence);
|
drm_atomic_set_fence_for_plane(state, fence);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void vc4_cleanup_fb(struct drm_plane *plane,
|
||||||
|
struct drm_plane_state *state)
|
||||||
|
{
|
||||||
|
struct vc4_bo *bo;
|
||||||
|
|
||||||
|
if (plane->state->fb == state->fb || !state->fb)
|
||||||
|
return;
|
||||||
|
|
||||||
|
bo = to_vc4_bo(&drm_fb_cma_get_gem_obj(state->fb, 0)->base);
|
||||||
|
vc4_bo_dec_usecnt(bo);
|
||||||
|
}
|
||||||
|
|
||||||
static const struct drm_plane_helper_funcs vc4_plane_helper_funcs = {
|
static const struct drm_plane_helper_funcs vc4_plane_helper_funcs = {
|
||||||
.atomic_check = vc4_plane_atomic_check,
|
.atomic_check = vc4_plane_atomic_check,
|
||||||
.atomic_update = vc4_plane_atomic_update,
|
.atomic_update = vc4_plane_atomic_update,
|
||||||
.prepare_fb = vc4_prepare_fb,
|
.prepare_fb = vc4_prepare_fb,
|
||||||
|
.cleanup_fb = vc4_cleanup_fb,
|
||||||
};
|
};
|
||||||
|
|
||||||
static void vc4_plane_destroy(struct drm_plane *plane)
|
static void vc4_plane_destroy(struct drm_plane *plane)
|
||||||
|
|||||||
@@ -1402,29 +1402,14 @@ static struct miscdevice vga_arb_device = {
|
|||||||
MISC_DYNAMIC_MINOR, "vga_arbiter", &vga_arb_device_fops
|
MISC_DYNAMIC_MINOR, "vga_arbiter", &vga_arb_device_fops
|
||||||
};
|
};
|
||||||
|
|
||||||
static int __init vga_arb_device_init(void)
|
static void __init vga_arb_select_default_device(void)
|
||||||
{
|
{
|
||||||
int rc;
|
|
||||||
struct pci_dev *pdev;
|
struct pci_dev *pdev;
|
||||||
struct vga_device *vgadev;
|
struct vga_device *vgadev;
|
||||||
|
|
||||||
rc = misc_register(&vga_arb_device);
|
#if defined(CONFIG_X86) || defined(CONFIG_IA64)
|
||||||
if (rc < 0)
|
|
||||||
pr_err("error %d registering device\n", rc);
|
|
||||||
|
|
||||||
bus_register_notifier(&pci_bus_type, &pci_notifier);
|
|
||||||
|
|
||||||
/* We add all pci devices satisfying vga class in the arbiter by
|
|
||||||
* default */
|
|
||||||
pdev = NULL;
|
|
||||||
while ((pdev =
|
|
||||||
pci_get_subsys(PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
|
|
||||||
PCI_ANY_ID, pdev)) != NULL)
|
|
||||||
vga_arbiter_add_pci_device(pdev);
|
|
||||||
|
|
||||||
list_for_each_entry(vgadev, &vga_list, list) {
|
list_for_each_entry(vgadev, &vga_list, list) {
|
||||||
struct device *dev = &vgadev->pdev->dev;
|
struct device *dev = &vgadev->pdev->dev;
|
||||||
#if defined(CONFIG_X86) || defined(CONFIG_IA64)
|
|
||||||
/*
|
/*
|
||||||
* Override vga_arbiter_add_pci_device()'s I/O based detection
|
* Override vga_arbiter_add_pci_device()'s I/O based detection
|
||||||
* as it may take the wrong device (e.g. on Apple system under
|
* as it may take the wrong device (e.g. on Apple system under
|
||||||
@@ -1461,13 +1446,66 @@ static int __init vga_arb_device_init(void)
|
|||||||
vgaarb_info(dev, "overriding boot device\n");
|
vgaarb_info(dev, "overriding boot device\n");
|
||||||
vga_set_default_device(vgadev->pdev);
|
vga_set_default_device(vgadev->pdev);
|
||||||
}
|
}
|
||||||
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
if (!vga_default_device()) {
|
||||||
|
list_for_each_entry(vgadev, &vga_list, list) {
|
||||||
|
struct device *dev = &vgadev->pdev->dev;
|
||||||
|
u16 cmd;
|
||||||
|
|
||||||
|
pdev = vgadev->pdev;
|
||||||
|
pci_read_config_word(pdev, PCI_COMMAND, &cmd);
|
||||||
|
if (cmd & (PCI_COMMAND_IO | PCI_COMMAND_MEMORY)) {
|
||||||
|
vgaarb_info(dev, "setting as boot device (VGA legacy resources not available)\n");
|
||||||
|
vga_set_default_device(pdev);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!vga_default_device()) {
|
||||||
|
vgadev = list_first_entry_or_null(&vga_list,
|
||||||
|
struct vga_device, list);
|
||||||
|
if (vgadev) {
|
||||||
|
struct device *dev = &vgadev->pdev->dev;
|
||||||
|
vgaarb_info(dev, "setting as boot device (VGA legacy resources not available)\n");
|
||||||
|
vga_set_default_device(vgadev->pdev);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static int __init vga_arb_device_init(void)
|
||||||
|
{
|
||||||
|
int rc;
|
||||||
|
struct pci_dev *pdev;
|
||||||
|
struct vga_device *vgadev;
|
||||||
|
|
||||||
|
rc = misc_register(&vga_arb_device);
|
||||||
|
if (rc < 0)
|
||||||
|
pr_err("error %d registering device\n", rc);
|
||||||
|
|
||||||
|
bus_register_notifier(&pci_bus_type, &pci_notifier);
|
||||||
|
|
||||||
|
/* We add all PCI devices satisfying VGA class in the arbiter by
|
||||||
|
* default */
|
||||||
|
pdev = NULL;
|
||||||
|
while ((pdev =
|
||||||
|
pci_get_subsys(PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
|
||||||
|
PCI_ANY_ID, pdev)) != NULL)
|
||||||
|
vga_arbiter_add_pci_device(pdev);
|
||||||
|
|
||||||
|
list_for_each_entry(vgadev, &vga_list, list) {
|
||||||
|
struct device *dev = &vgadev->pdev->dev;
|
||||||
|
|
||||||
if (vgadev->bridge_has_one_vga)
|
if (vgadev->bridge_has_one_vga)
|
||||||
vgaarb_info(dev, "bridge control possible\n");
|
vgaarb_info(dev, "bridge control possible\n");
|
||||||
else
|
else
|
||||||
vgaarb_info(dev, "no bridge control possible\n");
|
vgaarb_info(dev, "no bridge control possible\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
vga_arb_select_default_device();
|
||||||
|
|
||||||
pr_info("loaded\n");
|
pr_info("loaded\n");
|
||||||
return rc;
|
return rc;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -128,7 +128,7 @@ struct dma_fence_cb {
|
|||||||
* implementation know that there is another driver waiting on
|
* implementation know that there is another driver waiting on
|
||||||
* the signal (ie. hw->sw case).
|
* the signal (ie. hw->sw case).
|
||||||
*
|
*
|
||||||
* This function can be called called from atomic context, but not
|
* This function can be called from atomic context, but not
|
||||||
* from irq context, so normal spinlocks can be used.
|
* from irq context, so normal spinlocks can be used.
|
||||||
*
|
*
|
||||||
* A return value of false indicates the fence already passed,
|
* A return value of false indicates the fence already passed,
|
||||||
|
|||||||
@@ -41,6 +41,7 @@ extern "C" {
|
|||||||
#define DRM_VC4_SET_TILING 0x08
|
#define DRM_VC4_SET_TILING 0x08
|
||||||
#define DRM_VC4_GET_TILING 0x09
|
#define DRM_VC4_GET_TILING 0x09
|
||||||
#define DRM_VC4_LABEL_BO 0x0a
|
#define DRM_VC4_LABEL_BO 0x0a
|
||||||
|
#define DRM_VC4_GEM_MADVISE 0x0b
|
||||||
|
|
||||||
#define DRM_IOCTL_VC4_SUBMIT_CL DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_SUBMIT_CL, struct drm_vc4_submit_cl)
|
#define DRM_IOCTL_VC4_SUBMIT_CL DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_SUBMIT_CL, struct drm_vc4_submit_cl)
|
||||||
#define DRM_IOCTL_VC4_WAIT_SEQNO DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_WAIT_SEQNO, struct drm_vc4_wait_seqno)
|
#define DRM_IOCTL_VC4_WAIT_SEQNO DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_WAIT_SEQNO, struct drm_vc4_wait_seqno)
|
||||||
@@ -53,6 +54,7 @@ extern "C" {
|
|||||||
#define DRM_IOCTL_VC4_SET_TILING DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_SET_TILING, struct drm_vc4_set_tiling)
|
#define DRM_IOCTL_VC4_SET_TILING DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_SET_TILING, struct drm_vc4_set_tiling)
|
||||||
#define DRM_IOCTL_VC4_GET_TILING DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_GET_TILING, struct drm_vc4_get_tiling)
|
#define DRM_IOCTL_VC4_GET_TILING DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_GET_TILING, struct drm_vc4_get_tiling)
|
||||||
#define DRM_IOCTL_VC4_LABEL_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_LABEL_BO, struct drm_vc4_label_bo)
|
#define DRM_IOCTL_VC4_LABEL_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_LABEL_BO, struct drm_vc4_label_bo)
|
||||||
|
#define DRM_IOCTL_VC4_GEM_MADVISE DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_GEM_MADVISE, struct drm_vc4_gem_madvise)
|
||||||
|
|
||||||
struct drm_vc4_submit_rcl_surface {
|
struct drm_vc4_submit_rcl_surface {
|
||||||
__u32 hindex; /* Handle index, or ~0 if not present. */
|
__u32 hindex; /* Handle index, or ~0 if not present. */
|
||||||
@@ -305,6 +307,7 @@ struct drm_vc4_get_hang_state {
|
|||||||
#define DRM_VC4_PARAM_SUPPORTS_ETC1 4
|
#define DRM_VC4_PARAM_SUPPORTS_ETC1 4
|
||||||
#define DRM_VC4_PARAM_SUPPORTS_THREADED_FS 5
|
#define DRM_VC4_PARAM_SUPPORTS_THREADED_FS 5
|
||||||
#define DRM_VC4_PARAM_SUPPORTS_FIXED_RCL_ORDER 6
|
#define DRM_VC4_PARAM_SUPPORTS_FIXED_RCL_ORDER 6
|
||||||
|
#define DRM_VC4_PARAM_SUPPORTS_MADVISE 7
|
||||||
|
|
||||||
struct drm_vc4_get_param {
|
struct drm_vc4_get_param {
|
||||||
__u32 param;
|
__u32 param;
|
||||||
@@ -333,6 +336,22 @@ struct drm_vc4_label_bo {
|
|||||||
__u64 name;
|
__u64 name;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/*
|
||||||
|
* States prefixed with '__' are internal states and cannot be passed to the
|
||||||
|
* DRM_IOCTL_VC4_GEM_MADVISE ioctl.
|
||||||
|
*/
|
||||||
|
#define VC4_MADV_WILLNEED 0
|
||||||
|
#define VC4_MADV_DONTNEED 1
|
||||||
|
#define __VC4_MADV_PURGED 2
|
||||||
|
#define __VC4_MADV_NOTSUPP 3
|
||||||
|
|
||||||
|
struct drm_vc4_gem_madvise {
|
||||||
|
__u32 handle;
|
||||||
|
__u32 madv;
|
||||||
|
__u32 retained;
|
||||||
|
__u32 pad;
|
||||||
|
};
|
||||||
|
|
||||||
#if defined(__cplusplus)
|
#if defined(__cplusplus)
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|||||||
Reference in New Issue
Block a user