Merge tag 'drm-misc-next-2023-10-27' of git://anongit.freedesktop.org/drm/drm-misc into drm-next

drm-misc-next for v6.7-rc1:

drm-misc-next-2023-10-19 + following:

UAPI Changes:

Cross-subsystem Changes:
- Convert fbdev drivers to use fbdev i/o mem helpers.

Core Changes:
- Use cross-references for macros in docs.
- Make drm_client_buffer_addb use addfb2.
- Add NV20 and NV30 YUV formats.
- Documentation updates for create_dumb ioctl.
- CI fixes.
- Allow variable number of run-queues in scheduler.

Driver Changes:
- Rename drm/ast constants.
- Make ili9882t its own driver.
- Assorted fixes in ivpu, vc4, bridge/synopsis, amdgpu.
- Add planar formats to rockchip.

Signed-off-by: Dave Airlie <airlied@redhat.com>
From: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/3d92fae8-9b1b-4165-9ca8-5fda11ee146b@linux.intel.com
This commit is contained in:
Dave Airlie 2023-10-31 10:47:49 +10:00
commit 915b6d034b
192 changed files with 4686 additions and 1677 deletions

View File

@ -0,0 +1,115 @@
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
%YAML 1.2
---
$id: http://devicetree.org/schemas/display/bridge/fsl,imx93-mipi-dsi.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Freescale i.MX93 specific extensions to Synopsys Designware MIPI DSI
maintainers:
- Liu Ying <victor.liu@nxp.com>
description: |
There is a Synopsys Designware MIPI DSI Host Controller and a Synopsys
Designware MIPI DPHY embedded in Freescale i.MX93 SoC. Some configurations
and extensions to them are controlled by i.MX93 media blk-ctrl.
allOf:
- $ref: snps,dw-mipi-dsi.yaml#
properties:
compatible:
const: fsl,imx93-mipi-dsi
clocks:
items:
- description: apb clock
- description: pixel clock
- description: PHY configuration clock
- description: PHY reference clock
clock-names:
items:
- const: pclk
- const: pix
- const: phy_cfg
- const: phy_ref
interrupts:
maxItems: 1
fsl,media-blk-ctrl:
$ref: /schemas/types.yaml#/definitions/phandle
description:
i.MX93 media blk-ctrl, as a syscon, controls pixel component bit map
configurations from LCDIF display controller to the MIPI DSI host
controller and MIPI DPHY PLL related configurations through PLL SoC
interface.
power-domains:
maxItems: 1
required:
- compatible
- interrupts
- fsl,media-blk-ctrl
- power-domains
unevaluatedProperties: false
examples:
- |
#include <dt-bindings/clock/imx93-clock.h>
#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/power/fsl,imx93-power.h>
dsi@4ae10000 {
compatible = "fsl,imx93-mipi-dsi";
reg = <0x4ae10000 0x10000>;
interrupts = <GIC_SPI 177 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clk IMX93_CLK_MIPI_DSI_GATE>,
<&clk IMX93_CLK_MEDIA_DISP_PIX>,
<&clk IMX93_CLK_MIPI_PHY_CFG>,
<&clk IMX93_CLK_24M>;
clock-names = "pclk", "pix", "phy_cfg", "phy_ref";
fsl,media-blk-ctrl = <&media_blk_ctrl>;
power-domains = <&media_blk_ctrl IMX93_MEDIABLK_PD_MIPI_DSI>;
#address-cells = <1>;
#size-cells = <0>;
panel@0 {
compatible = "raydium,rm67191";
reg = <0>;
reset-gpios = <&adp5585gpio 6 GPIO_ACTIVE_LOW>;
dsi-lanes = <4>;
video-mode = <2>;
port {
panel_in: endpoint {
remote-endpoint = <&dsi_out>;
};
};
};
ports {
#address-cells = <1>;
#size-cells = <0>;
port@0 {
reg = <0>;
dsi_to_lcdif: endpoint {
remote-endpoint = <&lcdif_to_dsi>;
};
};
port@1 {
reg = <1>;
dsi_out: endpoint {
remote-endpoint = <&panel_in>;
};
};
};
};

View File

@ -22,6 +22,8 @@ properties:
enum:
# Anberic RG353V-V2 5.0" 640x480 TFT LCD panel
- anbernic,rg353v-panel-v2
# Powkiddy RGB30 3.0" 720x720 TFT LCD panel
- powkiddy,rgb30-panel
# Rocktech JH057N00900 5.5" 720x1440 TFT LCD panel
- rocktech,jh057n00900
# Xingbangda XBD599 5.99" 720x1440 TFT LCD panel

View File

@ -0,0 +1,42 @@
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
%YAML 1.2
---
$id: http://devicetree.org/schemas/display/solomon,ssd-common.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Common properties for Solomon OLED Display Controllers
maintainers:
- Javier Martinez Canillas <javierm@redhat.com>
properties:
reg:
maxItems: 1
reset-gpios:
maxItems: 1
# Only required for SPI
dc-gpios:
description:
GPIO connected to the controller's D/C# (Data/Command) pin,
that is needed for 4-wire SPI to tell the controller if the
data sent is for a command register or the display data RAM
maxItems: 1
solomon,height:
$ref: /schemas/types.yaml#/definitions/uint32
description:
Height in pixel of the screen driven by the controller.
The default value is controller-dependent.
solomon,width:
$ref: /schemas/types.yaml#/definitions/uint32
description:
Width in pixel of the screen driven by the controller.
The default value is controller-dependent.
allOf:
- $ref: /schemas/spi/spi-peripheral-props.yaml#
additionalProperties: true

View File

@ -27,38 +27,12 @@ properties:
- solomon,ssd1307
- solomon,ssd1309
reg:
maxItems: 1
pwms:
maxItems: 1
reset-gpios:
maxItems: 1
# Only required for SPI
dc-gpios:
description:
GPIO connected to the controller's D/C# (Data/Command) pin,
that is needed for 4-wire SPI to tell the controller if the
data sent is for a command register or the display data RAM
maxItems: 1
vbat-supply:
description: The supply for VBAT
solomon,height:
$ref: /schemas/types.yaml#/definitions/uint32
description:
Height in pixel of the screen driven by the controller.
The default value is controller-dependent.
solomon,width:
$ref: /schemas/types.yaml#/definitions/uint32
description:
Width in pixel of the screen driven by the controller.
The default value is controller-dependent.
solomon,page-offset:
$ref: /schemas/types.yaml#/definitions/uint32
default: 1
@ -148,7 +122,7 @@ required:
- reg
allOf:
- $ref: /schemas/spi/spi-peripheral-props.yaml#
- $ref: solomon,ssd-common.yaml#
- if:
properties:

View File

@ -0,0 +1,89 @@
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
%YAML 1.2
---
$id: http://devicetree.org/schemas/display/solomon,ssd132x.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Solomon SSD132x OLED Display Controllers
maintainers:
- Javier Martinez Canillas <javierm@redhat.com>
properties:
compatible:
- enum:
- solomon,ssd1322
- solomon,ssd1325
- solomon,ssd1327
required:
- compatible
- reg
allOf:
- $ref: solomon,ssd-common.yaml#
- if:
properties:
compatible:
contains:
const: solomon,ssd1322
then:
properties:
width:
default: 480
height:
default: 128
- if:
properties:
compatible:
contains:
const: solomon,ssd1325
then:
properties:
width:
default: 128
height:
default: 80
- if:
properties:
compatible:
contains:
const: solomon,ssd1327
then:
properties:
width:
default: 128
height:
default: 128
unevaluatedProperties: false
examples:
- |
i2c {
#address-cells = <1>;
#size-cells = <0>;
oled@3c {
compatible = "solomon,ssd1327";
reg = <0x3c>;
reset-gpios = <&gpio2 7>;
};
};
- |
spi {
#address-cells = <1>;
#size-cells = <0>;
oled@0 {
compatible = "solomon,ssd1327";
reg = <0x0>;
reset-gpios = <&gpio2 7>;
dc-gpios = <&gpio2 8>;
spi-max-frequency = <10000000>;
};
};

View File

@ -1081,6 +1081,8 @@ patternProperties:
description: Powertip Tech. Corp.
"^powervr,.*":
description: PowerVR (deprecated, use img)
"^powkiddy,.*":
description: Powkiddy
"^primux,.*":
description: Primux Trading, S.L.
"^probox2,.*":

View File

@ -67,6 +67,19 @@ Lists the tests that for a given driver on a specific hardware revision are
known to behave unreliably. These tests won't cause a job to fail regardless of
the result. They will still be run.
Each new flake entry must be associated with a link to the email reporting the
bug to the author of the affected driver, the board name or Device Tree name of
the board, the first kernel version affected, and an approximation of the
failure rate.
They should be provided under the following format::
# Bug Report: $LORE_OR_PATCHWORK_URL
# Board Name: broken-board.dtb
# Version: 6.6-rc1
# Failure Rate: 100
flaky-test
drivers/gpu/drm/ci/${DRIVER_NAME}-${HW_REVISION}-skips.txt
-----------------------------------------------------------
@ -86,10 +99,13 @@ https://gitlab.freedesktop.org/janedoe/linux/-/settings/ci_cd), change the
CI/CD configuration file from .gitlab-ci.yml to
drivers/gpu/drm/ci/gitlab-ci.yml.
3. Next time you push to this repository, you will see a CI pipeline being
3. Request to be added to the drm/ci-ok group so that your user has the
necessary privileges to run the CI on https://gitlab.freedesktop.org/drm/ci-ok
4. Next time you push to this repository, you will see a CI pipeline being
created (eg. https://gitlab.freedesktop.org/janedoe/linux/-/pipelines)
4. The various jobs will be run and when the pipeline is finished, all jobs
5. The various jobs will be run and when the pipeline is finished, all jobs
should be green unless a regression has been found.

View File

@ -360,6 +360,8 @@ Format Functions Reference
.. kernel-doc:: drivers/gpu/drm/drm_fourcc.c
:export:
.. _kms_dumb_buffer_objects:
Dumb Buffer Objects
===================

View File

@ -527,12 +527,12 @@ VBlank event handling
The DRM core exposes two vertical blank related ioctls:
DRM_IOCTL_WAIT_VBLANK
:c:macro:`DRM_IOCTL_WAIT_VBLANK`
This takes a struct drm_wait_vblank structure as its argument, and
it is used to block or request a signal when a specified vblank
event occurs.
DRM_IOCTL_MODESET_CTL
:c:macro:`DRM_IOCTL_MODESET_CTL`
This was only used for user-mode-settind drivers around modesetting
changes to allow the kernel to update the vblank interrupt after
mode setting, since on many devices the vertical blank counter is
@ -555,8 +555,8 @@ The index is used in cases where a densely packed identifier for a CRTC is
needed, for instance a bitmask of CRTC's. The member possible_crtcs of struct
drm_mode_get_plane is an example.
DRM_IOCTL_MODE_GETRESOURCES populates a structure with an array of CRTC ID's,
and the CRTC index is its position in this array.
:c:macro:`DRM_IOCTL_MODE_GETRESOURCES` populates a structure with an array of
CRTC ID's, and the CRTC index is its position in this array.
.. kernel-doc:: include/uapi/drm/drm.h
:internal:

View File

@ -0,0 +1,309 @@
.. SPDX-License-Identifier: (GPL-2.0+ OR MIT)
====================
Asynchronous VM_BIND
====================
Nomenclature:
=============
* ``VRAM``: On-device memory. Sometimes referred to as device local memory.
* ``gpu_vm``: A virtual GPU address space. Typically per process, but
can be shared by multiple processes.
* ``VM_BIND``: An operation or a list of operations to modify a gpu_vm using
an IOCTL. The operations include mapping and unmapping system- or
VRAM memory.
* ``syncobj``: A container that abstracts synchronization objects. The
synchronization objects can be either generic, like dma-fences or
driver specific. A syncobj typically indicates the type of the
underlying synchronization object.
* ``in-syncobj``: Argument to a VM_BIND IOCTL, the VM_BIND operation waits
for these before starting.
* ``out-syncobj``: Argument to a VM_BIND_IOCTL, the VM_BIND operation
signals these when the bind operation is complete.
* ``dma-fence``: A cross-driver synchronization object. A basic
understanding of dma-fences is required to digest this
document. Please refer to the ``DMA Fences`` section of the
:doc:`dma-buf doc </driver-api/dma-buf>`.
* ``memory fence``: A synchronization object, different from a dma-fence.
A memory fence uses the value of a specified memory location to determine
signaled status. A memory fence can be awaited and signaled by both
the GPU and CPU. Memory fences are sometimes referred to as
user-fences, userspace-fences or gpu futexes and do not necessarily obey
the dma-fence rule of signaling within a "reasonable amount of time".
The kernel should thus avoid waiting for memory fences with locks held.
* ``long-running workload``: A workload that may take more than the
current stipulated dma-fence maximum signal delay to complete and
which therefore needs to set the gpu_vm or the GPU execution context in
a certain mode that disallows completion dma-fences.
* ``exec function``: An exec function is a function that revalidates all
affected gpu_vmas, submits a GPU command batch and registers the
dma_fence representing the GPU command's activity with all affected
dma_resvs. For completeness, although not covered by this document,
it's worth mentioning that an exec function may also be the
revalidation worker that is used by some drivers in compute /
long-running mode.
* ``bind context``: A context identifier used for the VM_BIND
operation. VM_BIND operations that use the same bind context can be
assumed, where it matters, to complete in order of submission. No such
assumptions can be made for VM_BIND operations using separate bind contexts.
* ``UMD``: User-mode driver.
* ``KMD``: Kernel-mode driver.
Synchronous / Asynchronous VM_BIND operation
============================================
Synchronous VM_BIND
___________________
With Synchronous VM_BIND, the VM_BIND operations all complete before the
IOCTL returns. A synchronous VM_BIND takes neither in-fences nor
out-fences. Synchronous VM_BIND may block and wait for GPU operations;
for example swap-in or clearing, or even previous binds.
Asynchronous VM_BIND
____________________
Asynchronous VM_BIND accepts both in-syncobjs and out-syncobjs. While the
IOCTL may return immediately, the VM_BIND operations wait for the in-syncobjs
before modifying the GPU page-tables, and signal the out-syncobjs when
the modification is done in the sense that the next exec function that
awaits for the out-syncobjs will see the change. Errors are reported
synchronously.
In low-memory situations the implementation may block, performing the
VM_BIND synchronously, because there might not be enough memory
immediately available for preparing the asynchronous operation.
If the VM_BIND IOCTL takes a list or an array of operations as an argument,
the in-syncobjs needs to signal before the first operation starts to
execute, and the out-syncobjs signal after the last operation
completes. Operations in the operation list can be assumed, where it
matters, to complete in order.
Since asynchronous VM_BIND operations may use dma-fences embedded in
out-syncobjs and internally in KMD to signal bind completion, any
memory fences given as VM_BIND in-fences need to be awaited
synchronously before the VM_BIND ioctl returns, since dma-fences,
required to signal in a reasonable amount of time, can never be made
to depend on memory fences that don't have such a restriction.
The purpose of an Asynchronous VM_BIND operation is for user-mode
drivers to be able to pipeline interleaved gpu_vm modifications and
exec functions. For long-running workloads, such pipelining of a bind
operation is not allowed and any in-fences need to be awaited
synchronously. The reason for this is twofold. First, any memory
fences gated by a long-running workload and used as in-syncobjs for the
VM_BIND operation will need to be awaited synchronously anyway (see
above). Second, any dma-fences used as in-syncobjs for VM_BIND
operations for long-running workloads will not allow for pipelining
anyway since long-running workloads don't allow for dma-fences as
out-syncobjs, so while theoretically possible the use of them is
questionable and should be rejected until there is a valuable use-case.
Note that this is not a limitation imposed by dma-fence rules, but
rather a limitation imposed to keep KMD implementation simple. It does
not affect using dma-fences as dependencies for the long-running
workload itself, which is allowed by dma-fence rules, but rather for
the VM_BIND operation only.
An asynchronous VM_BIND operation may take substantial time to
complete and signal the out_fence. In particular if the operation is
deeply pipelined behind other VM_BIND operations and workloads
submitted using exec functions. In that case, UMD might want to avoid a
subsequent VM_BIND operation to be queued behind the first one if
there are no explicit dependencies. In order to circumvent such a queue-up, a
VM_BIND implementation may allow for VM_BIND contexts to be
created. For each context, VM_BIND operations will be guaranteed to
complete in the order they were submitted, but that is not the case
for VM_BIND operations executing on separate VM_BIND contexts. Instead
KMD will attempt to execute such VM_BIND operations in parallel but
leaving no guarantee that they will actually be executed in
parallel. There may be internal implicit dependencies that only KMD knows
about, for example page-table structure changes. A way to attempt
to avoid such internal dependencies is to have different VM_BIND
contexts use separate regions of a VM.
Also for VM_BINDS for long-running gpu_vms the user-mode driver should typically
select memory fences as out-fences since that gives greater flexibility for
the kernel mode driver to inject other operations into the bind /
unbind operations. Like for example inserting breakpoints into batch
buffers. The workload execution can then easily be pipelined behind
the bind completion using the memory out-fence as the signal condition
for a GPU semaphore embedded by UMD in the workload.
There is no difference in the operations supported or in
multi-operation support between asynchronous VM_BIND and synchronous VM_BIND.
Multi-operation VM_BIND IOCTL error handling and interrupts
===========================================================
The VM_BIND operations of the IOCTL may error for various reasons, for
example due to lack of resources to complete and due to interrupted
waits.
In these situations UMD should preferably restart the IOCTL after
taking suitable action.
If UMD has over-committed a memory resource, an -ENOSPC error will be
returned, and UMD may then unbind resources that are not used at the
moment and rerun the IOCTL. On -EINTR, UMD should simply rerun the
IOCTL and on -ENOMEM user-space may either attempt to free known
system memory resources or fail. In case of UMD deciding to fail a
bind operation, due to an error return, no additional action is needed
to clean up the failed operation, and the VM is left in the same state
as it was before the failing IOCTL.
Unbind operations are guaranteed not to return any errors due to
resource constraints, but may return errors due to, for example,
invalid arguments or the gpu_vm being banned.
In the case an unexpected error happens during the asynchronous bind
process, the gpu_vm will be banned, and attempts to use it after banning
will return -ENOENT.
Example: The Xe VM_BIND uAPI
============================
Starting with the VM_BIND operation struct, the IOCTL call can take
zero, one or many such operations. A zero number means only the
synchronization part of the IOCTL is carried out: an asynchronous
VM_BIND updates the syncobjects, whereas a sync VM_BIND waits for the
implicit dependencies to be fulfilled.
.. code-block:: c
struct drm_xe_vm_bind_op {
/**
* @obj: GEM object to operate on, MBZ for MAP_USERPTR, MBZ for UNMAP
*/
__u32 obj;
/** @pad: MBZ */
__u32 pad;
union {
/**
* @obj_offset: Offset into the object for MAP.
*/
__u64 obj_offset;
/** @userptr: user virtual address for MAP_USERPTR */
__u64 userptr;
};
/**
* @range: Number of bytes from the object to bind to addr, MBZ for UNMAP_ALL
*/
__u64 range;
/** @addr: Address to operate on, MBZ for UNMAP_ALL */
__u64 addr;
/**
* @tile_mask: Mask for which tiles to create binds for, 0 == All tiles,
* only applies to creating new VMAs
*/
__u64 tile_mask;
/* Map (parts of) an object into the GPU virtual address range.
#define XE_VM_BIND_OP_MAP 0x0
/* Unmap a GPU virtual address range */
#define XE_VM_BIND_OP_UNMAP 0x1
/*
* Map a CPU virtual address range into a GPU virtual
* address range.
*/
#define XE_VM_BIND_OP_MAP_USERPTR 0x2
/* Unmap a gem object from the VM. */
#define XE_VM_BIND_OP_UNMAP_ALL 0x3
/*
* Make the backing memory of an address range resident if
* possible. Note that this doesn't pin backing memory.
*/
#define XE_VM_BIND_OP_PREFETCH 0x4
/* Make the GPU map readonly. */
#define XE_VM_BIND_FLAG_READONLY (0x1 << 16)
/*
* Valid on a faulting VM only, do the MAP operation immediately rather
* than deferring the MAP to the page fault handler.
*/
#define XE_VM_BIND_FLAG_IMMEDIATE (0x1 << 17)
/*
* When the NULL flag is set, the page tables are setup with a special
* bit which indicates writes are dropped and all reads return zero. In
* the future, the NULL flags will only be valid for XE_VM_BIND_OP_MAP
* operations, the BO handle MBZ, and the BO offset MBZ. This flag is
* intended to implement VK sparse bindings.
*/
#define XE_VM_BIND_FLAG_NULL (0x1 << 18)
/** @op: Operation to perform (lower 16 bits) and flags (upper 16 bits) */
__u32 op;
/** @mem_region: Memory region to prefetch VMA to, instance not a mask */
__u32 region;
/** @reserved: Reserved */
__u64 reserved[2];
};
The VM_BIND IOCTL argument itself, looks like follows. Note that for
synchronous VM_BIND, the num_syncs and syncs fields must be zero. Here
the ``exec_queue_id`` field is the VM_BIND context discussed previously
that is used to facilitate out-of-order VM_BINDs.
.. code-block:: c
struct drm_xe_vm_bind {
/** @extensions: Pointer to the first extension struct, if any */
__u64 extensions;
/** @vm_id: The ID of the VM to bind to */
__u32 vm_id;
/**
* @exec_queue_id: exec_queue_id, must be of class DRM_XE_ENGINE_CLASS_VM_BIND
* and exec queue must have same vm_id. If zero, the default VM bind engine
* is used.
*/
__u32 exec_queue_id;
/** @num_binds: number of binds in this IOCTL */
__u32 num_binds;
/* If set, perform an async VM_BIND, if clear a sync VM_BIND */
#define XE_VM_BIND_IOCTL_FLAG_ASYNC (0x1 << 0)
/** @flag: Flags controlling all operations in this ioctl. */
__u32 flags;
union {
/** @bind: used if num_binds == 1 */
struct drm_xe_vm_bind_op bind;
/**
* @vector_of_binds: userptr to array of struct
* drm_xe_vm_bind_op if num_binds > 1
*/
__u64 vector_of_binds;
};
/** @num_syncs: amount of syncs to wait for or to signal on completion. */
__u32 num_syncs;
/** @pad2: MBZ */
__u32 pad2;
/** @syncs: pointer to struct drm_xe_sync array */
__u64 syncs;
/** @reserved: Reserved */
__u64 reserved[2];
};

View File

@ -0,0 +1,9 @@
.. SPDX-License-Identifier: (GPL-2.0+ OR MIT)
===========================================================
Misc DRM driver uAPI- and feature implementation guidelines
===========================================================
.. toctree::
drm-vm-bind-async

View File

@ -18,6 +18,7 @@ GPU Driver Developer's Guide
vga-switcheroo
vgaarbiter
automated_testing
implementation_guidelines
todo
rfc/index

View File

@ -97,8 +97,8 @@ memory fences. Ideally with helper support so people don't get it wrong in all
possible ways.
As a key measurable result, the benefits of ASYNC VM_BIND and a discussion of
various flavors, error handling and a sample API should be documented here or in
a separate document pointed to by this document.
various flavors, error handling and sample API suggestions are documented in
:doc:`The ASYNC VM_BIND document </gpu/drm-vm-bind-async>`.
Userptr integration and vm_bind
-------------------------------

View File

@ -6632,6 +6632,7 @@ S: Maintained
B: https://gitlab.freedesktop.org/drm/msm/-/issues
T: git https://gitlab.freedesktop.org/drm/msm.git
F: Documentation/devicetree/bindings/display/msm/
F: drivers/gpu/drm/ci/xfails/msm*
F: drivers/gpu/drm/msm/
F: include/uapi/drm/msm_drm.h
@ -6783,7 +6784,8 @@ DRM DRIVER FOR SOLOMON SSD130X OLED DISPLAYS
M: Javier Martinez Canillas <javierm@redhat.com>
S: Maintained
T: git git://anongit.freedesktop.org/drm/drm-misc
F: Documentation/devicetree/bindings/display/solomon,ssd1307fb.yaml
F: Documentation/devicetree/bindings/display/solomon,ssd-common.yaml
F: Documentation/devicetree/bindings/display/solomon,ssd13*.yaml
F: drivers/gpu/drm/solomon/ssd130x*
DRM DRIVER FOR ST-ERICSSON MCDE
@ -6918,6 +6920,7 @@ T: git git://anongit.freedesktop.org/drm/drm-misc
F: Documentation/devicetree/bindings/display/amlogic,meson-dw-hdmi.yaml
F: Documentation/devicetree/bindings/display/amlogic,meson-vpu.yaml
F: Documentation/gpu/meson.rst
F: drivers/gpu/drm/ci/xfails/meson*
F: drivers/gpu/drm/meson/
DRM DRIVERS FOR ATMEL HLCDC
@ -7030,6 +7033,7 @@ L: dri-devel@lists.freedesktop.org
L: linux-mediatek@lists.infradead.org (moderated for non-subscribers)
S: Supported
F: Documentation/devicetree/bindings/display/mediatek/
F: drivers/gpu/drm/ci/xfails/mediatek*
F: drivers/gpu/drm/mediatek/
F: drivers/phy/mediatek/phy-mtk-dp.c
F: drivers/phy/mediatek/phy-mtk-hdmi*
@ -7070,6 +7074,7 @@ L: dri-devel@lists.freedesktop.org
S: Maintained
T: git git://anongit.freedesktop.org/drm/drm-misc
F: Documentation/devicetree/bindings/display/rockchip/
F: drivers/gpu/drm/ci/xfails/rockchip*
F: drivers/gpu/drm/rockchip/
DRM DRIVERS FOR STI
@ -7166,7 +7171,7 @@ F: Documentation/devicetree/bindings/display/xlnx/
F: drivers/gpu/drm/xlnx/
DRM GPU SCHEDULER
M: Luben Tuikov <luben.tuikov@amd.com>
M: Luben Tuikov <ltuikov89@gmail.com>
L: dri-devel@lists.freedesktop.org
S: Maintained
T: git git://anongit.freedesktop.org/drm/drm-misc
@ -10512,6 +10517,7 @@ C: irc://irc.oftc.net/intel-gfx
T: git git://anongit.freedesktop.org/drm-intel
F: Documentation/ABI/testing/sysfs-driver-intel-i915-hwmon
F: Documentation/gpu/i915.rst
F: drivers/gpu/drm/ci/xfails/i915*
F: drivers/gpu/drm/i915/
F: include/drm/i915*
F: include/uapi/drm/i915_drm.h
@ -13544,7 +13550,7 @@ F: drivers/usb/mtu3/
MEGACHIPS STDPXXXX-GE-B850V3-FW LVDS/DP++ BRIDGES
M: Peter Senna Tschudin <peter.senna@gmail.com>
M: Martin Donnelly <martin.donnelly@ge.com>
M: Ian Ray <ian.ray@ge.com>
M: Martyn Welch <martyn.welch@collabora.co.uk>
S: Maintained
F: Documentation/devicetree/bindings/display/bridge/megachips-stdpxxxx-ge-b850v3-fw.txt
@ -17885,6 +17891,7 @@ C: irc://irc.oftc.net/radeon
T: git https://gitlab.freedesktop.org/agd5f/linux.git
F: Documentation/gpu/amdgpu/
F: drivers/gpu/drm/amd/
F: drivers/gpu/drm/ci/xfails/amd*
F: drivers/gpu/drm/radeon/
F: include/uapi/drm/amdgpu_drm.h
F: include/uapi/drm/radeon_drm.h
@ -22864,6 +22871,7 @@ L: dri-devel@lists.freedesktop.org
L: virtualization@lists.linux-foundation.org
S: Maintained
T: git git://anongit.freedesktop.org/drm/drm-misc
F: drivers/gpu/drm/ci/xfails/virtio*
F: drivers/gpu/drm/virtio/
F: include/uapi/linux/virtio_gpu.h

View File

@ -835,6 +835,7 @@ CONFIG_DRM_PANEL_BOE_TV101WUM_NL6=m
CONFIG_DRM_PANEL_LVDS=m
CONFIG_DRM_PANEL_SIMPLE=m
CONFIG_DRM_PANEL_EDP=m
CONFIG_DRM_PANEL_ILITEK_ILI9882T=m
CONFIG_DRM_PANEL_MANTIX_MLAF057WE51=m
CONFIG_DRM_PANEL_RAYDIUM_RM67191=m
CONFIG_DRM_PANEL_SITRONIX_ST7703=m

View File

@ -1,11 +0,0 @@
- Move to threaded_irqs to mitigate potential infinite loop in ivpu_ipc_irq_handler()
- Implement support for BLOB IDs
- Add debugfs support to improve debugging and testing
- Add tracing events for performance debugging
- Implement HW based scheduling support
- Use syncobjs for submit/sync
- Refactor IPC protocol to improve message latency
- Implement BO cache and MADVISE IOCTL
- Add support for user allocated buffers using prime import and dma-buf heaps
- Refactor struct ivpu_bo to use struct drm_gem_shmem_object
- Add driver/device documentation

View File

@ -131,6 +131,22 @@ static int ivpu_get_capabilities(struct ivpu_device *vdev, struct drm_ivpu_param
return 0;
}
static int ivpu_get_core_clock_rate(struct ivpu_device *vdev, u64 *clk_rate)
{
int ret;
ret = ivpu_rpm_get_if_active(vdev);
if (ret < 0)
return ret;
*clk_rate = ret ? ivpu_hw_reg_pll_freq_get(vdev) : 0;
if (ret)
ivpu_rpm_put(vdev);
return 0;
}
static int ivpu_get_param_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
{
struct ivpu_file_priv *file_priv = file->driver_priv;
@ -154,7 +170,7 @@ static int ivpu_get_param_ioctl(struct drm_device *dev, void *data, struct drm_f
args->value = vdev->platform;
break;
case DRM_IVPU_PARAM_CORE_CLOCK_RATE:
args->value = ivpu_hw_reg_pll_freq_get(vdev);
ret = ivpu_get_core_clock_rate(vdev, &args->value);
break;
case DRM_IVPU_PARAM_NUM_CONTEXTS:
args->value = ivpu_get_context_count(vdev);

View File

@ -321,13 +321,13 @@ void ivpu_fw_load(struct ivpu_device *vdev)
struct ivpu_fw_info *fw = vdev->fw;
u64 image_end_offset = fw->image_load_offset + fw->image_size;
memset(fw->mem->kvaddr, 0, fw->image_load_offset);
memcpy(fw->mem->kvaddr + fw->image_load_offset,
memset(ivpu_bo_vaddr(fw->mem), 0, fw->image_load_offset);
memcpy(ivpu_bo_vaddr(fw->mem) + fw->image_load_offset,
fw->file->data + FW_FILE_IMAGE_OFFSET, fw->image_size);
if (IVPU_WA(clear_runtime_mem)) {
u8 *start = fw->mem->kvaddr + image_end_offset;
u64 size = fw->mem->base.size - image_end_offset;
u8 *start = ivpu_bo_vaddr(fw->mem) + image_end_offset;
u64 size = ivpu_bo_size(fw->mem) - image_end_offset;
memset(start, 0, size);
}
@ -451,10 +451,10 @@ void ivpu_fw_boot_params_setup(struct ivpu_device *vdev, struct vpu_boot_params
vdev->hw->ranges.global.start;
boot_params->ipc_header_area_start = ipc_mem_rx->vpu_addr;
boot_params->ipc_header_area_size = ipc_mem_rx->base.size / 2;
boot_params->ipc_header_area_size = ivpu_bo_size(ipc_mem_rx) / 2;
boot_params->ipc_payload_area_start = ipc_mem_rx->vpu_addr + ipc_mem_rx->base.size / 2;
boot_params->ipc_payload_area_size = ipc_mem_rx->base.size / 2;
boot_params->ipc_payload_area_start = ipc_mem_rx->vpu_addr + ivpu_bo_size(ipc_mem_rx) / 2;
boot_params->ipc_payload_area_size = ivpu_bo_size(ipc_mem_rx) / 2;
boot_params->global_aliased_pio_base = vdev->hw->ranges.user.start;
boot_params->global_aliased_pio_size = ivpu_hw_range_size(&vdev->hw->ranges.user);
@ -486,9 +486,9 @@ void ivpu_fw_boot_params_setup(struct ivpu_device *vdev, struct vpu_boot_params
boot_params->trace_destination_mask = vdev->fw->trace_destination_mask;
boot_params->trace_hw_component_mask = vdev->fw->trace_hw_component_mask;
boot_params->crit_tracing_buff_addr = vdev->fw->mem_log_crit->vpu_addr;
boot_params->crit_tracing_buff_size = vdev->fw->mem_log_crit->base.size;
boot_params->crit_tracing_buff_size = ivpu_bo_size(vdev->fw->mem_log_crit);
boot_params->verbose_tracing_buff_addr = vdev->fw->mem_log_verb->vpu_addr;
boot_params->verbose_tracing_buff_size = vdev->fw->mem_log_verb->base.size;
boot_params->verbose_tracing_buff_size = ivpu_bo_size(vdev->fw->mem_log_verb);
boot_params->punit_telemetry_sram_base = ivpu_hw_reg_telemetry_offset_get(vdev);
boot_params->punit_telemetry_sram_size = ivpu_hw_reg_telemetry_size_get(vdev);

View File

@ -31,10 +31,10 @@ static int fw_log_ptr(struct ivpu_device *vdev, struct ivpu_bo *bo, u32 *offset,
{
struct vpu_tracing_buffer_header *log;
if ((*offset + sizeof(*log)) > bo->base.size)
if ((*offset + sizeof(*log)) > ivpu_bo_size(bo))
return -EINVAL;
log = bo->kvaddr + *offset;
log = ivpu_bo_vaddr(bo) + *offset;
if (log->vpu_canary_start != VPU_TRACING_BUFFER_CANARY)
return -EINVAL;
@ -43,7 +43,7 @@ static int fw_log_ptr(struct ivpu_device *vdev, struct ivpu_bo *bo, u32 *offset,
ivpu_dbg(vdev, FW_BOOT, "Invalid header size 0x%x\n", log->header_size);
return -EINVAL;
}
if ((char *)log + log->size > (char *)bo->kvaddr + bo->base.size) {
if ((char *)log + log->size > (char *)ivpu_bo_vaddr(bo) + ivpu_bo_size(bo)) {
ivpu_dbg(vdev, FW_BOOT, "Invalid log size 0x%x\n", log->size);
return -EINVAL;
}

View File

@ -69,7 +69,7 @@ static const struct ivpu_bo_ops prime_ops = {
static int __must_check shmem_alloc_pages_locked(struct ivpu_bo *bo)
{
int npages = bo->base.size >> PAGE_SHIFT;
int npages = ivpu_bo_size(bo) >> PAGE_SHIFT;
struct page **pages;
pages = drm_gem_get_pages(&bo->base);
@ -88,7 +88,7 @@ static int __must_check shmem_alloc_pages_locked(struct ivpu_bo *bo)
static void shmem_free_pages_locked(struct ivpu_bo *bo)
{
if (ivpu_bo_cache_mode(bo) != DRM_IVPU_BO_CACHED)
set_pages_array_wb(bo->pages, bo->base.size >> PAGE_SHIFT);
set_pages_array_wb(bo->pages, ivpu_bo_size(bo) >> PAGE_SHIFT);
drm_gem_put_pages(&bo->base, bo->pages, true, false);
bo->pages = NULL;
@ -96,7 +96,7 @@ static void shmem_free_pages_locked(struct ivpu_bo *bo)
static int ivpu_bo_map_pages_locked(struct ivpu_bo *bo)
{
int npages = bo->base.size >> PAGE_SHIFT;
int npages = ivpu_bo_size(bo) >> PAGE_SHIFT;
struct ivpu_device *vdev = ivpu_bo_to_vdev(bo);
struct sg_table *sgt;
int ret;
@ -142,7 +142,7 @@ static const struct ivpu_bo_ops shmem_ops = {
static int __must_check internal_alloc_pages_locked(struct ivpu_bo *bo)
{
unsigned int i, npages = bo->base.size >> PAGE_SHIFT;
unsigned int i, npages = ivpu_bo_size(bo) >> PAGE_SHIFT;
struct page **pages;
int ret;
@ -171,10 +171,10 @@ err_free_pages:
static void internal_free_pages_locked(struct ivpu_bo *bo)
{
unsigned int i, npages = bo->base.size >> PAGE_SHIFT;
unsigned int i, npages = ivpu_bo_size(bo) >> PAGE_SHIFT;
if (ivpu_bo_cache_mode(bo) != DRM_IVPU_BO_CACHED)
set_pages_array_wb(bo->pages, bo->base.size >> PAGE_SHIFT);
set_pages_array_wb(bo->pages, ivpu_bo_size(bo) >> PAGE_SHIFT);
for (i = 0; i < npages; i++)
put_page(bo->pages[i]);
@ -291,7 +291,7 @@ ivpu_bo_alloc_vpu_addr(struct ivpu_bo *bo, struct ivpu_mmu_context *ctx,
}
mutex_lock(&ctx->lock);
ret = ivpu_mmu_context_insert_node_locked(ctx, range, bo->base.size, &bo->mm_node);
ret = ivpu_mmu_context_insert_node_locked(ctx, range, ivpu_bo_size(bo), &bo->mm_node);
if (!ret) {
bo->ctx = ctx;
bo->vpu_addr = bo->mm_node.start;
@ -438,7 +438,7 @@ static int ivpu_bo_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
struct ivpu_device *vdev = ivpu_bo_to_vdev(bo);
ivpu_dbg(vdev, BO, "mmap: ctx %u handle %u vpu_addr 0x%llx size %zu type %s",
bo->ctx->id, bo->handle, bo->vpu_addr, bo->base.size, bo->ops->name);
bo->ctx->id, bo->handle, bo->vpu_addr, ivpu_bo_size(bo), bo->ops->name);
if (obj->import_attach) {
/* Drop the reference drm_gem_mmap_obj() acquired.*/
@ -553,7 +553,7 @@ ivpu_bo_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
drm_gem_object_put(&bo->base);
ivpu_dbg(vdev, BO, "alloc shmem: ctx %u vpu_addr 0x%llx size %zu flags 0x%x\n",
file_priv->ctx.id, bo->vpu_addr, bo->base.size, bo->flags);
file_priv->ctx.id, bo->vpu_addr, ivpu_bo_size(bo), bo->flags);
return ret;
}
@ -590,22 +590,22 @@ ivpu_bo_alloc_internal(struct ivpu_device *vdev, u64 vpu_addr, u64 size, u32 fla
goto err_put;
if (ivpu_bo_cache_mode(bo) != DRM_IVPU_BO_CACHED)
drm_clflush_pages(bo->pages, bo->base.size >> PAGE_SHIFT);
drm_clflush_pages(bo->pages, ivpu_bo_size(bo) >> PAGE_SHIFT);
if (bo->flags & DRM_IVPU_BO_WC)
set_pages_array_wc(bo->pages, bo->base.size >> PAGE_SHIFT);
set_pages_array_wc(bo->pages, ivpu_bo_size(bo) >> PAGE_SHIFT);
else if (bo->flags & DRM_IVPU_BO_UNCACHED)
set_pages_array_uc(bo->pages, bo->base.size >> PAGE_SHIFT);
set_pages_array_uc(bo->pages, ivpu_bo_size(bo) >> PAGE_SHIFT);
prot = ivpu_bo_pgprot(bo, PAGE_KERNEL);
bo->kvaddr = vmap(bo->pages, bo->base.size >> PAGE_SHIFT, VM_MAP, prot);
bo->kvaddr = vmap(bo->pages, ivpu_bo_size(bo) >> PAGE_SHIFT, VM_MAP, prot);
if (!bo->kvaddr) {
ivpu_err(vdev, "Failed to map BO into kernel virtual memory\n");
goto err_put;
}
ivpu_dbg(vdev, BO, "alloc internal: ctx 0 vpu_addr 0x%llx size %zu flags 0x%x\n",
bo->vpu_addr, bo->base.size, flags);
bo->vpu_addr, ivpu_bo_size(bo), flags);
return bo;
@ -718,7 +718,7 @@ static void ivpu_bo_print_info(struct ivpu_bo *bo, struct drm_printer *p)
dma_refcount = atomic_long_read(&bo->base.dma_buf->file->f_count);
drm_printf(p, "%5u %6d %16llx %10lu %10u %12lu %14s\n",
bo->ctx->id, bo->handle, bo->vpu_addr, bo->base.size,
bo->ctx->id, bo->handle, bo->vpu_addr, ivpu_bo_size(bo),
kref_read(&bo->base.refcount), dma_refcount, bo->ops->name);
}

View File

@ -68,9 +68,19 @@ static inline struct ivpu_bo *to_ivpu_bo(struct drm_gem_object *obj)
return container_of(obj, struct ivpu_bo, base);
}
static inline void *ivpu_bo_vaddr(struct ivpu_bo *bo)
{
return bo->kvaddr;
}
static inline size_t ivpu_bo_size(struct ivpu_bo *bo)
{
return bo->base.size;
}
static inline struct page *ivpu_bo_get_page(struct ivpu_bo *bo, u64 offset)
{
if (offset > bo->base.size || !bo->pages)
if (offset > ivpu_bo_size(bo) || !bo->pages)
return NULL;
return bo->pages[offset / PAGE_SIZE];
@ -107,21 +117,21 @@ static inline void *ivpu_to_cpu_addr(struct ivpu_bo *bo, u32 vpu_addr)
if (vpu_addr < bo->vpu_addr)
return NULL;
if (vpu_addr >= (bo->vpu_addr + bo->base.size))
if (vpu_addr >= (bo->vpu_addr + ivpu_bo_size(bo)))
return NULL;
return bo->kvaddr + (vpu_addr - bo->vpu_addr);
return ivpu_bo_vaddr(bo) + (vpu_addr - bo->vpu_addr);
}
static inline u32 cpu_to_vpu_addr(struct ivpu_bo *bo, void *cpu_addr)
{
if (cpu_addr < bo->kvaddr)
if (cpu_addr < ivpu_bo_vaddr(bo))
return 0;
if (cpu_addr >= (bo->kvaddr + bo->base.size))
if (cpu_addr >= (ivpu_bo_vaddr(bo) + ivpu_bo_size(bo)))
return 0;
return bo->vpu_addr + (cpu_addr - bo->kvaddr);
return bo->vpu_addr + (cpu_addr - ivpu_bo_vaddr(bo));
}
#endif /* __IVPU_GEM_H__ */

View File

@ -68,37 +68,9 @@
(REG_FLD(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, MSS_MBI)) | \
(REG_FLD(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, MSS_MBI_CMX)))
static char *ivpu_platform_to_str(u32 platform)
{
switch (platform) {
case IVPU_PLATFORM_SILICON:
return "IVPU_PLATFORM_SILICON";
case IVPU_PLATFORM_SIMICS:
return "IVPU_PLATFORM_SIMICS";
case IVPU_PLATFORM_FPGA:
return "IVPU_PLATFORM_FPGA";
default:
return "Invalid platform";
}
}
static void ivpu_hw_read_platform(struct ivpu_device *vdev)
{
u32 gen_ctrl = REGV_RD32(VPU_37XX_HOST_SS_GEN_CTRL);
u32 platform = REG_GET_FLD(VPU_37XX_HOST_SS_GEN_CTRL, PS, gen_ctrl);
if (platform == IVPU_PLATFORM_SIMICS || platform == IVPU_PLATFORM_FPGA)
vdev->platform = platform;
else
vdev->platform = IVPU_PLATFORM_SILICON;
ivpu_dbg(vdev, MISC, "Platform type: %s (%d)\n",
ivpu_platform_to_str(vdev->platform), vdev->platform);
}
static void ivpu_hw_wa_init(struct ivpu_device *vdev)
{
vdev->wa.punit_disabled = ivpu_is_fpga(vdev);
vdev->wa.punit_disabled = false;
vdev->wa.clear_runtime_mem = false;
vdev->wa.d3hot_after_power_off = true;
@ -113,19 +85,11 @@ static void ivpu_hw_wa_init(struct ivpu_device *vdev)
static void ivpu_hw_timeouts_init(struct ivpu_device *vdev)
{
if (ivpu_is_simics(vdev) || ivpu_is_fpga(vdev)) {
vdev->timeout.boot = 100000;
vdev->timeout.jsm = 50000;
vdev->timeout.tdr = 2000000;
vdev->timeout.reschedule_suspend = 1000;
vdev->timeout.autosuspend = -1;
} else {
vdev->timeout.boot = 1000;
vdev->timeout.jsm = 500;
vdev->timeout.tdr = 2000;
vdev->timeout.reschedule_suspend = 10;
vdev->timeout.autosuspend = 10;
}
vdev->timeout.boot = 1000;
vdev->timeout.jsm = 500;
vdev->timeout.tdr = 2000;
vdev->timeout.reschedule_suspend = 10;
vdev->timeout.autosuspend = 10;
}
static int ivpu_pll_wait_for_cmd_send(struct ivpu_device *vdev)
@ -220,8 +184,7 @@ static int ivpu_pll_drive(struct ivpu_device *vdev, bool enable)
int ret;
if (IVPU_WA(punit_disabled)) {
ivpu_dbg(vdev, PM, "Skipping PLL request on %s\n",
ivpu_platform_to_str(vdev->platform));
ivpu_dbg(vdev, PM, "Skipping PLL request\n");
return 0;
}
@ -484,10 +447,6 @@ static void ivpu_boot_pwr_island_drive(struct ivpu_device *vdev, bool enable)
static int ivpu_boot_wait_for_pwr_island_status(struct ivpu_device *vdev, u32 exp_val)
{
/* FPGA model (UPF) is not power aware, skipped Power Island polling */
if (ivpu_is_fpga(vdev))
return 0;
return REGV_POLL_FLD(VPU_37XX_HOST_SS_AON_PWR_ISLAND_STATUS0, MSS_CPU,
exp_val, PWR_ISLAND_STATUS_TIMEOUT_US);
}
@ -632,6 +591,10 @@ static int ivpu_hw_37xx_info_init(struct ivpu_device *vdev)
ivpu_hw_init_range(&hw->ranges.shave, 0x180000000, SZ_2G);
ivpu_hw_init_range(&hw->ranges.dma, 0x200000000, SZ_8G);
vdev->platform = IVPU_PLATFORM_SILICON;
ivpu_hw_wa_init(vdev);
ivpu_hw_timeouts_init(vdev);
return 0;
}
@ -688,10 +651,6 @@ static int ivpu_hw_37xx_power_up(struct ivpu_device *vdev)
{
int ret;
ivpu_hw_read_platform(vdev);
ivpu_hw_wa_init(vdev);
ivpu_hw_timeouts_init(vdev);
ret = ivpu_hw_37xx_reset(vdev);
if (ret)
ivpu_warn(vdev, "Failed to reset HW: %d\n", ret);

View File

@ -735,6 +735,10 @@ static int ivpu_hw_40xx_info_init(struct ivpu_device *vdev)
ivpu_hw_init_range(&vdev->hw->ranges.shave, 0x80000000 + SZ_256M, SZ_2G - SZ_256M);
ivpu_hw_init_range(&vdev->hw->ranges.dma, 0x200000000, SZ_8G);
ivpu_hw_read_platform(vdev);
ivpu_hw_wa_init(vdev);
ivpu_hw_timeouts_init(vdev);
return 0;
}
@ -826,10 +830,6 @@ static int ivpu_hw_40xx_power_up(struct ivpu_device *vdev)
return ret;
}
ivpu_hw_read_platform(vdev);
ivpu_hw_wa_init(vdev);
ivpu_hw_timeouts_init(vdev);
ret = ivpu_hw_40xx_d0i3_disable(vdev);
if (ret)
ivpu_warn(vdev, "Failed to disable D0I3: %d\n", ret);

View File

@ -47,22 +47,30 @@
#define REG_TEST_FLD_NUM(REG, FLD, num, val) \
((num) == FIELD_GET(REG##_##FLD##_MASK, val))
#define REGB_POLL(reg, var, cond, timeout_us) \
read_poll_timeout(REGB_RD32_SILENT, var, cond, REG_POLL_SLEEP_US, timeout_us, false, reg)
#define REGV_POLL(reg, var, cond, timeout_us) \
read_poll_timeout(REGV_RD32_SILENT, var, cond, REG_POLL_SLEEP_US, timeout_us, false, reg)
#define REGB_POLL_FLD(reg, fld, val, timeout_us) \
({ \
u32 var; \
REGB_POLL(reg, var, (FIELD_GET(reg##_##fld##_MASK, var) == (val)), timeout_us); \
int r; \
ivpu_dbg(vdev, REG, "%s : %s (0x%08x) Polling field %s started (expected 0x%x)\n", \
__func__, #reg, reg, #fld, val); \
r = read_poll_timeout(REGB_RD32_SILENT, var, (FIELD_GET(reg##_##fld##_MASK, var) == (val)),\
REG_POLL_SLEEP_US, timeout_us, false, (reg)); \
ivpu_dbg(vdev, REG, "%s : %s (0x%08x) Polling field %s %s (reg val 0x%08x)\n", \
__func__, #reg, reg, #fld, r ? "ETIMEDOUT" : "OK", var); \
r; \
})
#define REGV_POLL_FLD(reg, fld, val, timeout_us) \
({ \
u32 var; \
REGV_POLL(reg, var, (FIELD_GET(reg##_##fld##_MASK, var) == (val)), timeout_us); \
int r; \
ivpu_dbg(vdev, REG, "%s : %s (0x%08x) Polling field %s started (expected 0x%x)\n", \
__func__, #reg, reg, #fld, val); \
r = read_poll_timeout(REGV_RD32_SILENT, var, (FIELD_GET(reg##_##fld##_MASK, var) == (val)),\
REG_POLL_SLEEP_US, timeout_us, false, (reg)); \
ivpu_dbg(vdev, REG, "%s : %s (0x%08x) Polling field %s %s (reg val 0x%08x)\n", \
__func__, #reg, reg, #fld, r ? "ETIMEDOUT" : "OK", var); \
r; \
})
static inline u32
@ -71,7 +79,7 @@ ivpu_hw_reg_rd32(struct ivpu_device *vdev, void __iomem *base, u32 reg,
{
u32 val = readl(base + reg);
ivpu_dbg(vdev, REG, "%s RD: %s (0x%08x) => 0x%08x\n", func, name, reg, val);
ivpu_dbg(vdev, REG, "%s : %s (0x%08x) RD: 0x%08x\n", func, name, reg, val);
return val;
}
@ -81,7 +89,7 @@ ivpu_hw_reg_rd64(struct ivpu_device *vdev, void __iomem *base, u32 reg,
{
u64 val = readq(base + reg);
ivpu_dbg(vdev, REG, "%s RD: %s (0x%08x) => 0x%016llx\n", func, name, reg, val);
ivpu_dbg(vdev, REG, "%s : %s (0x%08x) RD: 0x%016llx\n", func, name, reg, val);
return val;
}
@ -89,7 +97,7 @@ static inline void
ivpu_hw_reg_wr32(struct ivpu_device *vdev, void __iomem *base, u32 reg, u32 val,
const char *name, const char *func)
{
ivpu_dbg(vdev, REG, "%s WR: %s (0x%08x) <= 0x%08x\n", func, name, reg, val);
ivpu_dbg(vdev, REG, "%s : %s (0x%08x) WR: 0x%08x\n", func, name, reg, val);
writel(val, base + reg);
}
@ -97,7 +105,7 @@ static inline void
ivpu_hw_reg_wr64(struct ivpu_device *vdev, void __iomem *base, u32 reg, u64 val,
const char *name, const char *func)
{
ivpu_dbg(vdev, REG, "%s WR: %s (0x%08x) <= 0x%016llx\n", func, name, reg, val);
ivpu_dbg(vdev, REG, "%s : %s (0x%08x) WR: 0x%016llx\n", func, name, reg, val);
writeq(val, base + reg);
}

View File

@ -45,8 +45,9 @@ static void ivpu_jsm_msg_dump(struct ivpu_device *vdev, char *c,
u32 *payload = (u32 *)&jsm_msg->payload;
ivpu_dbg(vdev, JSM,
"%s: vpu:0x%08x (type:0x%x, status:0x%x, id: 0x%x, result: 0x%x, payload:0x%x 0x%x 0x%x 0x%x 0x%x)\n",
c, vpu_addr, jsm_msg->type, jsm_msg->status, jsm_msg->request_id, jsm_msg->result,
"%s: vpu:0x%08x (type:%s, status:0x%x, id: 0x%x, result: 0x%x, payload:0x%x 0x%x 0x%x 0x%x 0x%x)\n",
c, vpu_addr, ivpu_jsm_msg_type_to_str(jsm_msg->type),
jsm_msg->status, jsm_msg->request_id, jsm_msg->result,
payload[0], payload[1], payload[2], payload[3], payload[4]);
}
@ -79,8 +80,8 @@ ivpu_ipc_tx_prepare(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons,
tx_buf_vpu_addr = gen_pool_alloc(ipc->mm_tx, sizeof(*tx_buf));
if (!tx_buf_vpu_addr) {
ivpu_err(vdev, "Failed to reserve IPC buffer, size %ld\n",
sizeof(*tx_buf));
ivpu_err_ratelimited(vdev, "Failed to reserve IPC buffer, size %ld\n",
sizeof(*tx_buf));
return -ENOMEM;
}
@ -93,12 +94,12 @@ ivpu_ipc_tx_prepare(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons,
jsm_vpu_addr = tx_buf_vpu_addr + offsetof(struct ivpu_ipc_tx_buf, jsm);
if (tx_buf->ipc.status != IVPU_IPC_HDR_FREE)
ivpu_warn(vdev, "IPC message vpu:0x%x not released by firmware\n",
tx_buf_vpu_addr);
ivpu_warn_ratelimited(vdev, "IPC message vpu:0x%x not released by firmware\n",
tx_buf_vpu_addr);
if (tx_buf->jsm.status != VPU_JSM_MSG_FREE)
ivpu_warn(vdev, "JSM message vpu:0x%x not released by firmware\n",
jsm_vpu_addr);
ivpu_warn_ratelimited(vdev, "JSM message vpu:0x%x not released by firmware\n",
jsm_vpu_addr);
memset(tx_buf, 0, sizeof(*tx_buf));
tx_buf->ipc.data_addr = jsm_vpu_addr;
@ -263,18 +264,19 @@ ivpu_ipc_send_receive_internal(struct ivpu_device *vdev, struct vpu_jsm_msg *req
ret = ivpu_ipc_send(vdev, &cons, req);
if (ret) {
ivpu_warn(vdev, "IPC send failed: %d\n", ret);
ivpu_warn_ratelimited(vdev, "IPC send failed: %d\n", ret);
goto consumer_del;
}
ret = ivpu_ipc_receive(vdev, &cons, NULL, resp, timeout_ms);
if (ret) {
ivpu_warn(vdev, "IPC receive failed: type 0x%x, ret %d\n", req->type, ret);
ivpu_warn_ratelimited(vdev, "IPC receive failed: type %s, ret %d\n",
ivpu_jsm_msg_type_to_str(req->type), ret);
goto consumer_del;
}
if (resp->type != expected_resp_type) {
ivpu_warn(vdev, "Invalid JSM response type: 0x%x\n", resp->type);
ivpu_warn_ratelimited(vdev, "Invalid JSM response type: 0x%x\n", resp->type);
ret = -EBADE;
}
@ -372,13 +374,13 @@ int ivpu_ipc_irq_handler(struct ivpu_device *vdev)
while (ivpu_hw_reg_ipc_rx_count_get(vdev)) {
vpu_addr = ivpu_hw_reg_ipc_rx_addr_get(vdev);
if (vpu_addr == REG_IO_ERROR) {
ivpu_err(vdev, "Failed to read IPC rx addr register\n");
ivpu_err_ratelimited(vdev, "Failed to read IPC rx addr register\n");
return -EIO;
}
ipc_hdr = ivpu_to_cpu_addr(ipc->mem_rx, vpu_addr);
if (!ipc_hdr) {
ivpu_warn(vdev, "IPC msg 0x%x out of range\n", vpu_addr);
ivpu_warn_ratelimited(vdev, "IPC msg 0x%x out of range\n", vpu_addr);
continue;
}
ivpu_ipc_msg_dump(vdev, "RX", ipc_hdr, vpu_addr);
@ -387,7 +389,8 @@ int ivpu_ipc_irq_handler(struct ivpu_device *vdev)
if (ipc_hdr->channel != IVPU_IPC_CHAN_BOOT_MSG) {
jsm_msg = ivpu_to_cpu_addr(ipc->mem_rx, ipc_hdr->data_addr);
if (!jsm_msg) {
ivpu_warn(vdev, "JSM msg 0x%x out of range\n", ipc_hdr->data_addr);
ivpu_warn_ratelimited(vdev, "JSM msg 0x%x out of range\n",
ipc_hdr->data_addr);
ivpu_ipc_rx_mark_free(vdev, ipc_hdr, NULL);
continue;
}
@ -395,7 +398,8 @@ int ivpu_ipc_irq_handler(struct ivpu_device *vdev)
}
if (atomic_read(&ipc->rx_msg_count) > IPC_MAX_RX_MSG) {
ivpu_warn(vdev, "IPC RX msg dropped, msg count %d\n", IPC_MAX_RX_MSG);
ivpu_warn_ratelimited(vdev, "IPC RX msg dropped, msg count %d\n",
IPC_MAX_RX_MSG);
ivpu_ipc_rx_mark_free(vdev, ipc_hdr, jsm_msg);
continue;
}
@ -446,7 +450,7 @@ int ivpu_ipc_init(struct ivpu_device *vdev)
goto err_free_rx;
}
ret = gen_pool_add(ipc->mm_tx, ipc->mem_tx->vpu_addr, ipc->mem_tx->base.size, -1);
ret = gen_pool_add(ipc->mm_tx, ipc->mem_tx->vpu_addr, ivpu_bo_size(ipc->mem_tx), -1);
if (ret) {
ivpu_err(vdev, "gen_pool_add failed, ret %d\n", ret);
goto err_free_rx;
@ -502,8 +506,8 @@ void ivpu_ipc_reset(struct ivpu_device *vdev)
mutex_lock(&ipc->lock);
memset(ipc->mem_tx->kvaddr, 0, ipc->mem_tx->base.size);
memset(ipc->mem_rx->kvaddr, 0, ipc->mem_rx->base.size);
memset(ivpu_bo_vaddr(ipc->mem_tx), 0, ivpu_bo_size(ipc->mem_tx));
memset(ivpu_bo_vaddr(ipc->mem_rx), 0, ivpu_bo_size(ipc->mem_rx));
wmb(); /* Flush WC buffers for TX and RX rings */
mutex_unlock(&ipc->lock);

View File

@ -48,10 +48,10 @@ static struct ivpu_cmdq *ivpu_cmdq_alloc(struct ivpu_file_priv *file_priv, u16 e
goto cmdq_free;
cmdq->db_id = file_priv->ctx.id + engine * ivpu_get_context_count(vdev);
cmdq->entry_count = (u32)((cmdq->mem->base.size - sizeof(struct vpu_job_queue_header)) /
cmdq->entry_count = (u32)((ivpu_bo_size(cmdq->mem) - sizeof(struct vpu_job_queue_header)) /
sizeof(struct vpu_job_queue_entry));
cmdq->jobq = (struct vpu_job_queue *)cmdq->mem->kvaddr;
cmdq->jobq = (struct vpu_job_queue *)ivpu_bo_vaddr(cmdq->mem);
jobq_header = &cmdq->jobq->header;
jobq_header->engine_idx = engine;
jobq_header->head = 0;
@ -93,7 +93,7 @@ static struct ivpu_cmdq *ivpu_cmdq_acquire(struct ivpu_file_priv *file_priv, u16
return cmdq;
ret = ivpu_jsm_register_db(vdev, file_priv->ctx.id, cmdq->db_id,
cmdq->mem->vpu_addr, cmdq->mem->base.size);
cmdq->mem->vpu_addr, ivpu_bo_size(cmdq->mem));
if (ret)
return NULL;
@ -453,7 +453,7 @@ ivpu_job_prepare_bos_for_submit(struct drm_file *file, struct ivpu_job *job, u32
return -EBUSY;
}
if (commands_offset >= bo->base.size) {
if (commands_offset >= ivpu_bo_size(bo)) {
ivpu_warn(vdev, "Invalid command buffer offset %u\n", commands_offset);
return -EINVAL;
}

View File

@ -7,6 +7,70 @@
#include "ivpu_ipc.h"
#include "ivpu_jsm_msg.h"
const char *ivpu_jsm_msg_type_to_str(enum vpu_ipc_msg_type type)
{
#define IVPU_CASE_TO_STR(x) case x: return #x
switch (type) {
IVPU_CASE_TO_STR(VPU_JSM_MSG_UNKNOWN);
IVPU_CASE_TO_STR(VPU_JSM_MSG_ENGINE_RESET);
IVPU_CASE_TO_STR(VPU_JSM_MSG_ENGINE_PREEMPT);
IVPU_CASE_TO_STR(VPU_JSM_MSG_REGISTER_DB);
IVPU_CASE_TO_STR(VPU_JSM_MSG_UNREGISTER_DB);
IVPU_CASE_TO_STR(VPU_JSM_MSG_QUERY_ENGINE_HB);
IVPU_CASE_TO_STR(VPU_JSM_MSG_GET_POWER_LEVEL_COUNT);
IVPU_CASE_TO_STR(VPU_JSM_MSG_GET_POWER_LEVEL);
IVPU_CASE_TO_STR(VPU_JSM_MSG_SET_POWER_LEVEL);
IVPU_CASE_TO_STR(VPU_JSM_MSG_METRIC_STREAMER_OPEN);
IVPU_CASE_TO_STR(VPU_JSM_MSG_METRIC_STREAMER_CLOSE);
IVPU_CASE_TO_STR(VPU_JSM_MSG_TRACE_SET_CONFIG);
IVPU_CASE_TO_STR(VPU_JSM_MSG_TRACE_GET_CONFIG);
IVPU_CASE_TO_STR(VPU_JSM_MSG_TRACE_GET_CAPABILITY);
IVPU_CASE_TO_STR(VPU_JSM_MSG_TRACE_GET_NAME);
IVPU_CASE_TO_STR(VPU_JSM_MSG_SSID_RELEASE);
IVPU_CASE_TO_STR(VPU_JSM_MSG_METRIC_STREAMER_START);
IVPU_CASE_TO_STR(VPU_JSM_MSG_METRIC_STREAMER_STOP);
IVPU_CASE_TO_STR(VPU_JSM_MSG_METRIC_STREAMER_UPDATE);
IVPU_CASE_TO_STR(VPU_JSM_MSG_METRIC_STREAMER_INFO);
IVPU_CASE_TO_STR(VPU_JSM_MSG_SET_PRIORITY_BAND_SETUP);
IVPU_CASE_TO_STR(VPU_JSM_MSG_CREATE_CMD_QUEUE);
IVPU_CASE_TO_STR(VPU_JSM_MSG_DESTROY_CMD_QUEUE);
IVPU_CASE_TO_STR(VPU_JSM_MSG_SET_CONTEXT_SCHED_PROPERTIES);
IVPU_CASE_TO_STR(VPU_JSM_MSG_HWS_REGISTER_DB);
IVPU_CASE_TO_STR(VPU_JSM_MSG_BLOB_DEINIT);
IVPU_CASE_TO_STR(VPU_JSM_MSG_DYNDBG_CONTROL);
IVPU_CASE_TO_STR(VPU_JSM_MSG_JOB_DONE);
IVPU_CASE_TO_STR(VPU_JSM_MSG_ENGINE_RESET_DONE);
IVPU_CASE_TO_STR(VPU_JSM_MSG_ENGINE_PREEMPT_DONE);
IVPU_CASE_TO_STR(VPU_JSM_MSG_REGISTER_DB_DONE);
IVPU_CASE_TO_STR(VPU_JSM_MSG_UNREGISTER_DB_DONE);
IVPU_CASE_TO_STR(VPU_JSM_MSG_QUERY_ENGINE_HB_DONE);
IVPU_CASE_TO_STR(VPU_JSM_MSG_GET_POWER_LEVEL_COUNT_DONE);
IVPU_CASE_TO_STR(VPU_JSM_MSG_GET_POWER_LEVEL_DONE);
IVPU_CASE_TO_STR(VPU_JSM_MSG_SET_POWER_LEVEL_DONE);
IVPU_CASE_TO_STR(VPU_JSM_MSG_METRIC_STREAMER_OPEN_DONE);
IVPU_CASE_TO_STR(VPU_JSM_MSG_METRIC_STREAMER_CLOSE_DONE);
IVPU_CASE_TO_STR(VPU_JSM_MSG_TRACE_SET_CONFIG_RSP);
IVPU_CASE_TO_STR(VPU_JSM_MSG_TRACE_GET_CONFIG_RSP);
IVPU_CASE_TO_STR(VPU_JSM_MSG_TRACE_GET_CAPABILITY_RSP);
IVPU_CASE_TO_STR(VPU_JSM_MSG_TRACE_GET_NAME_RSP);
IVPU_CASE_TO_STR(VPU_JSM_MSG_SSID_RELEASE_DONE);
IVPU_CASE_TO_STR(VPU_JSM_MSG_METRIC_STREAMER_START_DONE);
IVPU_CASE_TO_STR(VPU_JSM_MSG_METRIC_STREAMER_STOP_DONE);
IVPU_CASE_TO_STR(VPU_JSM_MSG_METRIC_STREAMER_UPDATE_DONE);
IVPU_CASE_TO_STR(VPU_JSM_MSG_METRIC_STREAMER_INFO_DONE);
IVPU_CASE_TO_STR(VPU_JSM_MSG_METRIC_STREAMER_NOTIFICATION);
IVPU_CASE_TO_STR(VPU_JSM_MSG_SET_PRIORITY_BAND_SETUP_RSP);
IVPU_CASE_TO_STR(VPU_JSM_MSG_CREATE_CMD_QUEUE_RSP);
IVPU_CASE_TO_STR(VPU_JSM_MSG_DESTROY_CMD_QUEUE_RSP);
IVPU_CASE_TO_STR(VPU_JSM_MSG_SET_CONTEXT_SCHED_PROPERTIES_RSP);
IVPU_CASE_TO_STR(VPU_JSM_MSG_BLOB_DEINIT_DONE);
IVPU_CASE_TO_STR(VPU_JSM_MSG_DYNDBG_CONTROL_RSP);
}
#undef IVPU_CASE_TO_STR
return "Unknown JSM message type";
}
int ivpu_jsm_register_db(struct ivpu_device *vdev, u32 ctx_id, u32 db_id,
u64 jobq_base, u32 jobq_size)
{
@ -22,7 +86,7 @@ int ivpu_jsm_register_db(struct ivpu_device *vdev, u32 ctx_id, u32 db_id,
ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_REGISTER_DB_DONE, &resp,
VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
if (ret) {
ivpu_err(vdev, "Failed to register doorbell %d: %d\n", db_id, ret);
ivpu_err_ratelimited(vdev, "Failed to register doorbell %d: %d\n", db_id, ret);
return ret;
}
@ -42,7 +106,7 @@ int ivpu_jsm_unregister_db(struct ivpu_device *vdev, u32 db_id)
ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_UNREGISTER_DB_DONE, &resp,
VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
if (ret) {
ivpu_warn(vdev, "Failed to unregister doorbell %d: %d\n", db_id, ret);
ivpu_warn_ratelimited(vdev, "Failed to unregister doorbell %d: %d\n", db_id, ret);
return ret;
}
@ -65,7 +129,8 @@ int ivpu_jsm_get_heartbeat(struct ivpu_device *vdev, u32 engine, u64 *heartbeat)
ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_QUERY_ENGINE_HB_DONE, &resp,
VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
if (ret) {
ivpu_err(vdev, "Failed to get heartbeat from engine %d: %d\n", engine, ret);
ivpu_err_ratelimited(vdev, "Failed to get heartbeat from engine %d: %d\n",
engine, ret);
return ret;
}
@ -87,7 +152,7 @@ int ivpu_jsm_reset_engine(struct ivpu_device *vdev, u32 engine)
ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_ENGINE_RESET_DONE, &resp,
VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
if (ret)
ivpu_err(vdev, "Failed to reset engine %d: %d\n", engine, ret);
ivpu_err_ratelimited(vdev, "Failed to reset engine %d: %d\n", engine, ret);
return ret;
}
@ -107,7 +172,7 @@ int ivpu_jsm_preempt_engine(struct ivpu_device *vdev, u32 engine, u32 preempt_id
ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_ENGINE_PREEMPT_DONE, &resp,
VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
if (ret)
ivpu_err(vdev, "Failed to preempt engine %d: %d\n", engine, ret);
ivpu_err_ratelimited(vdev, "Failed to preempt engine %d: %d\n", engine, ret);
return ret;
}
@ -123,7 +188,8 @@ int ivpu_jsm_dyndbg_control(struct ivpu_device *vdev, char *command, size_t size
ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_DYNDBG_CONTROL_RSP, &resp,
VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
if (ret)
ivpu_warn(vdev, "Failed to send command \"%s\": ret %d\n", command, ret);
ivpu_warn_ratelimited(vdev, "Failed to send command \"%s\": ret %d\n",
command, ret);
return ret;
}
@ -138,7 +204,7 @@ int ivpu_jsm_trace_get_capability(struct ivpu_device *vdev, u32 *trace_destinati
ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_TRACE_GET_CAPABILITY_RSP, &resp,
VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
if (ret) {
ivpu_warn(vdev, "Failed to get trace capability: %d\n", ret);
ivpu_warn_ratelimited(vdev, "Failed to get trace capability: %d\n", ret);
return ret;
}
@ -162,7 +228,7 @@ int ivpu_jsm_trace_set_config(struct ivpu_device *vdev, u32 trace_level, u32 tra
ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_TRACE_SET_CONFIG_RSP, &resp,
VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
if (ret)
ivpu_warn(vdev, "Failed to set config: %d\n", ret);
ivpu_warn_ratelimited(vdev, "Failed to set config: %d\n", ret);
return ret;
}

View File

@ -8,6 +8,8 @@
#include "vpu_jsm_api.h"
const char *ivpu_jsm_msg_type_to_str(enum vpu_ipc_msg_type type);
int ivpu_jsm_register_db(struct ivpu_device *vdev, u32 ctx_id, u32 db_id,
u64 jobq_base, u32 jobq_size);
int ivpu_jsm_unregister_db(struct ivpu_device *vdev, u32 db_id);

View File

@ -18,10 +18,12 @@
#define IVPU_MMU_REG_IDR5 0x00200014u
#define IVPU_MMU_REG_CR0 0x00200020u
#define IVPU_MMU_REG_CR0ACK 0x00200024u
#define IVPU_MMU_REG_CR0ACK_VAL_MASK GENMASK(31, 0)
#define IVPU_MMU_REG_CR1 0x00200028u
#define IVPU_MMU_REG_CR2 0x0020002cu
#define IVPU_MMU_REG_IRQ_CTRL 0x00200050u
#define IVPU_MMU_REG_IRQ_CTRLACK 0x00200054u
#define IVPU_MMU_REG_IRQ_CTRLACK_VAL_MASK GENMASK(31, 0)
#define IVPU_MMU_REG_GERROR 0x00200060u
#define IVPU_MMU_REG_GERROR_CMDQ_MASK BIT_MASK(0)
@ -39,12 +41,13 @@
#define IVPU_MMU_REG_CMDQ_BASE 0x00200090u
#define IVPU_MMU_REG_CMDQ_PROD 0x00200098u
#define IVPU_MMU_REG_CMDQ_CONS 0x0020009cu
#define IVPU_MMU_REG_CMDQ_CONS_VAL_MASK GENMASK(23, 0)
#define IVPU_MMU_REG_CMDQ_CONS_ERR_MASK GENMASK(30, 24)
#define IVPU_MMU_REG_EVTQ_BASE 0x002000a0u
#define IVPU_MMU_REG_EVTQ_PROD 0x002000a8u
#define IVPU_MMU_REG_EVTQ_CONS 0x002000acu
#define IVPU_MMU_REG_EVTQ_PROD_SEC (0x002000a8u + SZ_64K)
#define IVPU_MMU_REG_EVTQ_CONS_SEC (0x002000acu + SZ_64K)
#define IVPU_MMU_REG_CMDQ_CONS_ERR_MASK GENMASK(30, 24)
#define IVPU_MMU_IDR0_REF 0x080f3e0f
#define IVPU_MMU_IDR0_REF_SIMICS 0x080f3e1f
@ -409,19 +412,18 @@ static int ivpu_mmu_structs_alloc(struct ivpu_device *vdev)
return ret;
}
static int ivpu_mmu_reg_write(struct ivpu_device *vdev, u32 reg, u32 val)
static int ivpu_mmu_reg_write_cr0(struct ivpu_device *vdev, u32 val)
{
u32 reg_ack = reg + 4; /* ACK register is 4B after base register */
u32 val_ack;
int ret;
REGV_WR32(IVPU_MMU_REG_CR0, val);
REGV_WR32(reg, val);
return REGV_POLL_FLD(IVPU_MMU_REG_CR0ACK, VAL, val, IVPU_MMU_REG_TIMEOUT_US);
}
ret = REGV_POLL(reg_ack, val_ack, (val == val_ack), IVPU_MMU_REG_TIMEOUT_US);
if (ret)
ivpu_err(vdev, "Failed to write register 0x%x\n", reg);
static int ivpu_mmu_reg_write_irq_ctrl(struct ivpu_device *vdev, u32 val)
{
REGV_WR32(IVPU_MMU_REG_IRQ_CTRL, val);
return ret;
return REGV_POLL_FLD(IVPU_MMU_REG_IRQ_CTRLACK, VAL, val, IVPU_MMU_REG_TIMEOUT_US);
}
static int ivpu_mmu_irqs_setup(struct ivpu_device *vdev)
@ -429,19 +431,26 @@ static int ivpu_mmu_irqs_setup(struct ivpu_device *vdev)
u32 irq_ctrl = IVPU_MMU_IRQ_EVTQ_EN | IVPU_MMU_IRQ_GERROR_EN;
int ret;
ret = ivpu_mmu_reg_write(vdev, IVPU_MMU_REG_IRQ_CTRL, 0);
ret = ivpu_mmu_reg_write_irq_ctrl(vdev, 0);
if (ret)
return ret;
return ivpu_mmu_reg_write(vdev, IVPU_MMU_REG_IRQ_CTRL, irq_ctrl);
return ivpu_mmu_reg_write_irq_ctrl(vdev, irq_ctrl);
}
static int ivpu_mmu_cmdq_wait_for_cons(struct ivpu_device *vdev)
{
struct ivpu_mmu_queue *cmdq = &vdev->mmu->cmdq;
int ret;
return REGV_POLL(IVPU_MMU_REG_CMDQ_CONS, cmdq->cons, (cmdq->prod == cmdq->cons),
IVPU_MMU_QUEUE_TIMEOUT_US);
ret = REGV_POLL_FLD(IVPU_MMU_REG_CMDQ_CONS, VAL, cmdq->prod,
IVPU_MMU_QUEUE_TIMEOUT_US);
if (ret)
return ret;
cmdq->cons = cmdq->prod;
return 0;
}
static int ivpu_mmu_cmdq_cmd_write(struct ivpu_device *vdev, const char *name, u64 data0, u64 data1)
@ -528,7 +537,7 @@ static int ivpu_mmu_reset(struct ivpu_device *vdev)
mmu->evtq.prod = 0;
mmu->evtq.cons = 0;
ret = ivpu_mmu_reg_write(vdev, IVPU_MMU_REG_CR0, 0);
ret = ivpu_mmu_reg_write_cr0(vdev, 0);
if (ret)
return ret;
@ -548,7 +557,7 @@ static int ivpu_mmu_reset(struct ivpu_device *vdev)
REGV_WR32(IVPU_MMU_REG_CMDQ_CONS, 0);
val = IVPU_MMU_CR0_CMDQEN;
ret = ivpu_mmu_reg_write(vdev, IVPU_MMU_REG_CR0, val);
ret = ivpu_mmu_reg_write_cr0(vdev, val);
if (ret)
return ret;
@ -569,12 +578,12 @@ static int ivpu_mmu_reset(struct ivpu_device *vdev)
REGV_WR32(IVPU_MMU_REG_EVTQ_CONS_SEC, 0);
val |= IVPU_MMU_CR0_EVTQEN;
ret = ivpu_mmu_reg_write(vdev, IVPU_MMU_REG_CR0, val);
ret = ivpu_mmu_reg_write_cr0(vdev, val);
if (ret)
return ret;
val |= IVPU_MMU_CR0_ATSCHK;
ret = ivpu_mmu_reg_write(vdev, IVPU_MMU_REG_CR0, val);
ret = ivpu_mmu_reg_write_cr0(vdev, val);
if (ret)
return ret;
@ -583,7 +592,7 @@ static int ivpu_mmu_reset(struct ivpu_device *vdev)
return ret;
val |= IVPU_MMU_CR0_SMMUEN;
return ivpu_mmu_reg_write(vdev, IVPU_MMU_REG_CR0, val);
return ivpu_mmu_reg_write_cr0(vdev, val);
}
static void ivpu_mmu_strtab_link_cd(struct ivpu_device *vdev, u32 sid)

View File

@ -37,7 +37,7 @@ static void ivpu_pm_prepare_cold_boot(struct ivpu_device *vdev)
static void ivpu_pm_prepare_warm_boot(struct ivpu_device *vdev)
{
struct ivpu_fw_info *fw = vdev->fw;
struct vpu_boot_params *bp = fw->mem->kvaddr;
struct vpu_boot_params *bp = ivpu_bo_vaddr(fw->mem);
if (!bp->save_restore_ret_address) {
ivpu_pm_prepare_cold_boot(vdev);
@ -246,6 +246,19 @@ int ivpu_rpm_get(struct ivpu_device *vdev)
return ret;
}
int ivpu_rpm_get_if_active(struct ivpu_device *vdev)
{
int ret;
ivpu_dbg(vdev, RPM, "rpm_get_if_active count %d\n",
atomic_read(&vdev->drm.dev->power.usage_count));
ret = pm_runtime_get_if_active(vdev->drm.dev, false);
drm_WARN_ON(&vdev->drm, ret < 0);
return ret;
}
void ivpu_rpm_put(struct ivpu_device *vdev)
{
pm_runtime_mark_last_busy(vdev->drm.dev);

View File

@ -33,6 +33,7 @@ void ivpu_pm_reset_prepare_cb(struct pci_dev *pdev);
void ivpu_pm_reset_done_cb(struct pci_dev *pdev);
int __must_check ivpu_rpm_get(struct ivpu_device *vdev);
int __must_check ivpu_rpm_get_if_active(struct ivpu_device *vdev);
void ivpu_rpm_put(struct ivpu_device *vdev);
void ivpu_pm_schedule_recovery(struct ivpu_device *vdev);

View File

@ -2492,6 +2492,7 @@ static int amdgpu_device_init_schedulers(struct amdgpu_device *adev)
}
r = drm_sched_init(&ring->sched, &amdgpu_sched_ops,
DRM_SCHED_PRIORITY_COUNT,
ring->num_hw_submission, 0,
timeout, adev->reset_domain->wq,
ring->sched_score, ring->name,
@ -2661,6 +2662,9 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
if (r)
goto init_failed;
if (adev->mman.buffer_funcs_ring->sched.ready)
amdgpu_ttm_set_buffer_funcs_status(adev, true);
/* Don't init kfd if whole hive need to be reset during init */
if (!adev->gmc.xgmi.pending_reset) {
kgd2kfd_init_zone_device(adev);
@ -3258,6 +3262,8 @@ int amdgpu_device_ip_suspend(struct amdgpu_device *adev)
amdgpu_virt_request_full_gpu(adev, false);
}
amdgpu_ttm_set_buffer_funcs_status(adev, false);
r = amdgpu_device_ip_suspend_phase1(adev);
if (r)
return r;
@ -3447,6 +3453,9 @@ static int amdgpu_device_ip_resume(struct amdgpu_device *adev)
r = amdgpu_device_ip_resume_phase2(adev);
if (adev->mman.buffer_funcs_ring->sched.ready)
amdgpu_ttm_set_buffer_funcs_status(adev, true);
return r;
}
@ -4234,6 +4243,8 @@ void amdgpu_device_fini_hw(struct amdgpu_device *adev)
/* disable ras feature must before hw fini */
amdgpu_ras_pre_fini(adev);
amdgpu_ttm_set_buffer_funcs_status(adev, false);
amdgpu_device_ip_fini_early(adev);
amdgpu_irq_fini_hw(adev);
@ -4405,6 +4416,8 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
amdgpu_ras_suspend(adev);
amdgpu_ttm_set_buffer_funcs_status(adev, false);
amdgpu_device_ip_suspend_phase1(adev);
if (!adev->in_s0ix)
@ -5176,6 +5189,9 @@ int amdgpu_do_asic_reset(struct list_head *device_list_handle,
if (r)
goto out;
if (tmp_adev->mman.buffer_funcs_ring->sched.ready)
amdgpu_ttm_set_buffer_funcs_status(tmp_adev, true);
if (vram_lost)
amdgpu_device_fill_reset_magic(tmp_adev);

View File

@ -325,8 +325,8 @@ void amdgpu_job_stop_all_jobs_on_sched(struct drm_gpu_scheduler *sched)
int i;
/* Signal all jobs not yet scheduled */
for (i = DRM_SCHED_PRIORITY_COUNT - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) {
struct drm_sched_rq *rq = &sched->sched_rq[i];
for (i = sched->num_rqs - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) {
struct drm_sched_rq *rq = sched->sched_rq[i];
spin_lock(&rq->lock);
list_for_each_entry(s_entity, &rq->entities, list) {
while ((s_job = to_drm_sched_job(spsc_queue_pop(&s_entity->job_queue)))) {

View File

@ -292,27 +292,6 @@ out:
return err;
}
void amdgpu_sdma_unset_buffer_funcs_helper(struct amdgpu_device *adev)
{
struct amdgpu_ring *sdma;
int i;
for (i = 0; i < adev->sdma.num_instances; i++) {
if (adev->sdma.has_page_queue) {
sdma = &adev->sdma.instance[i].page;
if (adev->mman.buffer_funcs_ring == sdma) {
amdgpu_ttm_set_buffer_funcs_status(adev, false);
break;
}
}
sdma = &adev->sdma.instance[i].ring;
if (adev->mman.buffer_funcs_ring == sdma) {
amdgpu_ttm_set_buffer_funcs_status(adev, false);
break;
}
}
}
int amdgpu_sdma_ras_sw_init(struct amdgpu_device *adev)
{
int err = 0;

View File

@ -169,7 +169,6 @@ int amdgpu_sdma_init_microcode(struct amdgpu_device *adev, u32 instance,
bool duplicate);
void amdgpu_sdma_destroy_inst_ctx(struct amdgpu_device *adev,
bool duplicate);
void amdgpu_sdma_unset_buffer_funcs_helper(struct amdgpu_device *adev);
int amdgpu_sdma_ras_sw_init(struct amdgpu_device *adev);
#endif

View File

@ -308,8 +308,6 @@ static void cik_sdma_gfx_stop(struct amdgpu_device *adev)
u32 rb_cntl;
int i;
amdgpu_sdma_unset_buffer_funcs_helper(adev);
for (i = 0; i < adev->sdma.num_instances; i++) {
rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]);
rb_cntl &= ~SDMA0_GFX_RB_CNTL__RB_ENABLE_MASK;
@ -498,9 +496,6 @@ static int cik_sdma_gfx_resume(struct amdgpu_device *adev)
r = amdgpu_ring_test_helper(ring);
if (r)
return r;
if (adev->mman.buffer_funcs_ring == ring)
amdgpu_ttm_set_buffer_funcs_status(adev, true);
}
return 0;

View File

@ -339,8 +339,6 @@ static void sdma_v2_4_gfx_stop(struct amdgpu_device *adev)
u32 rb_cntl, ib_cntl;
int i;
amdgpu_sdma_unset_buffer_funcs_helper(adev);
for (i = 0; i < adev->sdma.num_instances; i++) {
rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]);
rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 0);
@ -474,9 +472,6 @@ static int sdma_v2_4_gfx_resume(struct amdgpu_device *adev)
r = amdgpu_ring_test_helper(ring);
if (r)
return r;
if (adev->mman.buffer_funcs_ring == ring)
amdgpu_ttm_set_buffer_funcs_status(adev, true);
}
return 0;

View File

@ -513,8 +513,6 @@ static void sdma_v3_0_gfx_stop(struct amdgpu_device *adev)
u32 rb_cntl, ib_cntl;
int i;
amdgpu_sdma_unset_buffer_funcs_helper(adev);
for (i = 0; i < adev->sdma.num_instances; i++) {
rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]);
rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 0);
@ -746,9 +744,6 @@ static int sdma_v3_0_gfx_resume(struct amdgpu_device *adev)
r = amdgpu_ring_test_helper(ring);
if (r)
return r;
if (adev->mman.buffer_funcs_ring == ring)
amdgpu_ttm_set_buffer_funcs_status(adev, true);
}
return 0;

View File

@ -877,8 +877,6 @@ static void sdma_v4_0_gfx_enable(struct amdgpu_device *adev, bool enable)
u32 rb_cntl, ib_cntl;
int i;
amdgpu_sdma_unset_buffer_funcs_helper(adev);
for (i = 0; i < adev->sdma.num_instances; i++) {
rb_cntl = RREG32_SDMA(i, mmSDMA0_GFX_RB_CNTL);
rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, enable ? 1 : 0);
@ -913,8 +911,6 @@ static void sdma_v4_0_page_stop(struct amdgpu_device *adev)
u32 rb_cntl, ib_cntl;
int i;
amdgpu_sdma_unset_buffer_funcs_helper(adev);
for (i = 0; i < adev->sdma.num_instances; i++) {
rb_cntl = RREG32_SDMA(i, mmSDMA0_PAGE_RB_CNTL);
rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_PAGE_RB_CNTL,
@ -1402,13 +1398,7 @@ static int sdma_v4_0_start(struct amdgpu_device *adev)
r = amdgpu_ring_test_helper(page);
if (r)
return r;
if (adev->mman.buffer_funcs_ring == page)
amdgpu_ttm_set_buffer_funcs_status(adev, true);
}
if (adev->mman.buffer_funcs_ring == ring)
amdgpu_ttm_set_buffer_funcs_status(adev, true);
}
return r;
@ -1921,11 +1911,8 @@ static int sdma_v4_0_hw_fini(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int i;
if (amdgpu_sriov_vf(adev)) {
/* disable the scheduler for SDMA */
amdgpu_sdma_unset_buffer_funcs_helper(adev);
if (amdgpu_sriov_vf(adev))
return 0;
}
if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__SDMA)) {
for (i = 0; i < adev->sdma.num_instances; i++) {
@ -1964,7 +1951,6 @@ static int sdma_v4_0_resume(void *handle)
if (adev->in_s0ix) {
sdma_v4_0_enable(adev, true);
sdma_v4_0_gfx_enable(adev, true);
amdgpu_ttm_set_buffer_funcs_status(adev, true);
return 0;
}

View File

@ -559,8 +559,6 @@ static void sdma_v5_0_gfx_stop(struct amdgpu_device *adev)
u32 rb_cntl, ib_cntl;
int i;
amdgpu_sdma_unset_buffer_funcs_helper(adev);
for (i = 0; i < adev->sdma.num_instances; i++) {
rb_cntl = RREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL));
rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 0);
@ -825,9 +823,6 @@ static int sdma_v5_0_gfx_resume(struct amdgpu_device *adev)
r = amdgpu_ring_test_helper(ring);
if (r)
return r;
if (adev->mman.buffer_funcs_ring == ring)
amdgpu_ttm_set_buffer_funcs_status(adev, true);
}
return 0;
@ -1426,11 +1421,8 @@ static int sdma_v5_0_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (amdgpu_sriov_vf(adev)) {
/* disable the scheduler for SDMA */
amdgpu_sdma_unset_buffer_funcs_helper(adev);
if (amdgpu_sriov_vf(adev))
return 0;
}
sdma_v5_0_ctx_switch_enable(adev, false);
sdma_v5_0_enable(adev, false);

View File

@ -364,8 +364,6 @@ static void sdma_v5_2_gfx_stop(struct amdgpu_device *adev)
u32 rb_cntl, ib_cntl;
int i;
amdgpu_sdma_unset_buffer_funcs_helper(adev);
for (i = 0; i < adev->sdma.num_instances; i++) {
rb_cntl = RREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL));
rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 0);
@ -625,9 +623,6 @@ static int sdma_v5_2_gfx_resume(struct amdgpu_device *adev)
r = amdgpu_ring_test_helper(ring);
if (r)
return r;
if (adev->mman.buffer_funcs_ring == ring)
amdgpu_ttm_set_buffer_funcs_status(adev, true);
}
return 0;
@ -1284,11 +1279,8 @@ static int sdma_v5_2_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (amdgpu_sriov_vf(adev)) {
/* disable the scheduler for SDMA */
amdgpu_sdma_unset_buffer_funcs_helper(adev);
if (amdgpu_sriov_vf(adev))
return 0;
}
sdma_v5_2_ctx_switch_enable(adev, false);
sdma_v5_2_enable(adev, false);

View File

@ -348,8 +348,6 @@ static void sdma_v6_0_gfx_stop(struct amdgpu_device *adev)
u32 rb_cntl, ib_cntl;
int i;
amdgpu_sdma_unset_buffer_funcs_helper(adev);
for (i = 0; i < adev->sdma.num_instances; i++) {
rb_cntl = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_CNTL));
rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, RB_ENABLE, 0);
@ -561,9 +559,6 @@ static int sdma_v6_0_gfx_resume(struct amdgpu_device *adev)
r = amdgpu_ring_test_helper(ring);
if (r)
return r;
if (adev->mman.buffer_funcs_ring == ring)
amdgpu_ttm_set_buffer_funcs_status(adev, true);
}
return 0;
@ -1308,11 +1303,8 @@ static int sdma_v6_0_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (amdgpu_sriov_vf(adev)) {
/* disable the scheduler for SDMA */
amdgpu_sdma_unset_buffer_funcs_helper(adev);
if (amdgpu_sriov_vf(adev))
return 0;
}
sdma_v6_0_ctxempty_int_enable(adev, false);
sdma_v6_0_enable(adev, false);

View File

@ -115,8 +115,6 @@ static void si_dma_stop(struct amdgpu_device *adev)
u32 rb_cntl;
unsigned i;
amdgpu_sdma_unset_buffer_funcs_helper(adev);
for (i = 0; i < adev->sdma.num_instances; i++) {
/* dma0 */
rb_cntl = RREG32(DMA_RB_CNTL + sdma_offsets[i]);
@ -177,9 +175,6 @@ static int si_dma_start(struct amdgpu_device *adev)
r = amdgpu_ring_test_helper(ring);
if (r)
return r;
if (adev->mman.buffer_funcs_ring == ring)
amdgpu_ttm_set_buffer_funcs_status(adev, true);
}
return 0;

View File

@ -9,11 +9,11 @@
bool ast_astdp_is_connected(struct ast_device *ast)
{
if (!ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xD1, ASTDP_MCU_FW_EXECUTING))
if (!ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xD1, ASTDP_MCU_FW_EXECUTING))
return false;
if (!ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xDF, ASTDP_HPD))
if (!ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xDF, ASTDP_HPD))
return false;
if (!ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xDC, ASTDP_LINK_SUCCESS))
if (!ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xDC, ASTDP_LINK_SUCCESS))
return false;
return true;
}
@ -29,22 +29,22 @@ int ast_astdp_read_edid(struct drm_device *dev, u8 *ediddata)
* CRDF[b0]: DP HPD
* CRE5[b0]: Host reading EDID process is done
*/
if (!(ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xD1, ASTDP_MCU_FW_EXECUTING) &&
ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xDC, ASTDP_LINK_SUCCESS) &&
ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xDF, ASTDP_HPD) &&
ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xE5,
if (!(ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xD1, ASTDP_MCU_FW_EXECUTING) &&
ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xDC, ASTDP_LINK_SUCCESS) &&
ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xDF, ASTDP_HPD) &&
ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xE5,
ASTDP_HOST_EDID_READ_DONE_MASK))) {
goto err_astdp_edid_not_ready;
}
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xE5, (u8) ~ASTDP_HOST_EDID_READ_DONE_MASK,
ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xE5, (u8) ~ASTDP_HOST_EDID_READ_DONE_MASK,
0x00);
for (i = 0; i < 32; i++) {
/*
* CRE4[7:0]: Read-Pointer for EDID (Unit: 4bytes); valid range: 0~64
*/
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xE4,
ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xE4,
ASTDP_AND_CLEAR_MASK, (u8)i);
j = 0;
@ -52,9 +52,9 @@ int ast_astdp_read_edid(struct drm_device *dev, u8 *ediddata)
* CRD7[b0]: valid flag for EDID
* CRD6[b0]: mirror read pointer for EDID
*/
while ((ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xD7,
while ((ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xD7,
ASTDP_EDID_VALID_FLAG_MASK) != 0x01) ||
(ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xD6,
(ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xD6,
ASTDP_EDID_READ_POINTER_MASK) != i)) {
/*
* Delay are getting longer with each retry.
@ -64,11 +64,11 @@ int ast_astdp_read_edid(struct drm_device *dev, u8 *ediddata)
*/
mdelay(j+1);
if (!(ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xD1,
if (!(ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xD1,
ASTDP_MCU_FW_EXECUTING) &&
ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xDC,
ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xDC,
ASTDP_LINK_SUCCESS) &&
ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xDF, ASTDP_HPD))) {
ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xDF, ASTDP_HPD))) {
goto err_astdp_jump_out_loop_of_edid;
}
@ -77,13 +77,13 @@ int ast_astdp_read_edid(struct drm_device *dev, u8 *ediddata)
goto err_astdp_jump_out_loop_of_edid;
}
*(ediddata) = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT,
*(ediddata) = ast_get_index_reg_mask(ast, AST_IO_VGACRI,
0xD8, ASTDP_EDID_READ_DATA_MASK);
*(ediddata + 1) = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xD9,
*(ediddata + 1) = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xD9,
ASTDP_EDID_READ_DATA_MASK);
*(ediddata + 2) = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xDA,
*(ediddata + 2) = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xDA,
ASTDP_EDID_READ_DATA_MASK);
*(ediddata + 3) = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xDB,
*(ediddata + 3) = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xDB,
ASTDP_EDID_READ_DATA_MASK);
if (i == 31) {
@ -103,25 +103,25 @@ int ast_astdp_read_edid(struct drm_device *dev, u8 *ediddata)
ediddata += 4;
}
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xE5, (u8) ~ASTDP_HOST_EDID_READ_DONE_MASK,
ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xE5, (u8) ~ASTDP_HOST_EDID_READ_DONE_MASK,
ASTDP_HOST_EDID_READ_DONE);
return 0;
err_astdp_jump_out_loop_of_edid:
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xE5,
ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xE5,
(u8) ~ASTDP_HOST_EDID_READ_DONE_MASK,
ASTDP_HOST_EDID_READ_DONE);
return (~(j+256) + 1);
err_astdp_edid_not_ready:
if (!(ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xD1, ASTDP_MCU_FW_EXECUTING)))
if (!(ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xD1, ASTDP_MCU_FW_EXECUTING)))
return (~0xD1 + 1);
if (!(ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xDC, ASTDP_LINK_SUCCESS)))
if (!(ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xDC, ASTDP_LINK_SUCCESS)))
return (~0xDC + 1);
if (!(ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xDF, ASTDP_HPD)))
if (!(ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xDF, ASTDP_HPD)))
return (~0xDF + 1);
if (!(ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xE5, ASTDP_HOST_EDID_READ_DONE_MASK)))
if (!(ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xE5, ASTDP_HOST_EDID_READ_DONE_MASK)))
return (~0xE5 + 1);
return 0;
@ -137,7 +137,7 @@ void ast_dp_launch(struct drm_device *dev)
struct ast_device *ast = to_ast_device(dev);
// Wait one second then timeout.
while (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xD1, ASTDP_MCU_FW_EXECUTING) !=
while (ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xD1, ASTDP_MCU_FW_EXECUTING) !=
ASTDP_MCU_FW_EXECUTING) {
i++;
// wait 100 ms
@ -153,7 +153,7 @@ void ast_dp_launch(struct drm_device *dev)
if (!bDPExecute)
drm_err(dev, "Wait DPMCU executing timeout\n");
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xE5,
ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xE5,
(u8) ~ASTDP_HOST_EDID_READ_DONE_MASK,
ASTDP_HOST_EDID_READ_DONE);
}
@ -164,14 +164,14 @@ void ast_dp_power_on_off(struct drm_device *dev, bool on)
{
struct ast_device *ast = to_ast_device(dev);
// Read and Turn off DP PHY sleep
u8 bE3 = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xE3, AST_DP_VIDEO_ENABLE);
u8 bE3 = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xE3, AST_DP_VIDEO_ENABLE);
// Turn on DP PHY sleep
if (!on)
bE3 |= AST_DP_PHY_SLEEP;
// DP Power on/off
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xE3, (u8) ~AST_DP_PHY_SLEEP, bE3);
ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xE3, (u8) ~AST_DP_PHY_SLEEP, bE3);
}
@ -182,13 +182,13 @@ void ast_dp_set_on_off(struct drm_device *dev, bool on)
u8 video_on_off = on;
// Video On/Off
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xE3, (u8) ~AST_DP_VIDEO_ENABLE, on);
ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xE3, (u8) ~AST_DP_VIDEO_ENABLE, on);
// If DP plug in and link successful then check video on / off status
if (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xDC, ASTDP_LINK_SUCCESS) &&
ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xDF, ASTDP_HPD)) {
if (ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xDC, ASTDP_LINK_SUCCESS) &&
ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xDF, ASTDP_HPD)) {
video_on_off <<= 4;
while (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xDF,
while (ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xDF,
ASTDP_MIRROR_VIDEO_ENABLE) != video_on_off) {
// wait 1 ms
mdelay(1);
@ -264,8 +264,8 @@ void ast_dp_set_mode(struct drm_crtc *crtc, struct ast_vbios_mode_info *vbios_mo
* CRE1[7:0]: MISC1 (default: 0x00)
* CRE2[7:0]: video format index (0x00 ~ 0x20 or 0x40 ~ 0x50)
*/
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xE0, ASTDP_AND_CLEAR_MASK,
ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xE0, ASTDP_AND_CLEAR_MASK,
ASTDP_MISC0_24bpp);
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xE1, ASTDP_AND_CLEAR_MASK, ASTDP_MISC1);
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xE2, ASTDP_AND_CLEAR_MASK, ModeIdx);
ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xE1, ASTDP_AND_CLEAR_MASK, ASTDP_MISC1);
ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xE2, ASTDP_AND_CLEAR_MASK, ModeIdx);
}

View File

@ -31,17 +31,17 @@ static int ast_load_dp501_microcode(struct drm_device *dev)
static void send_ack(struct ast_device *ast)
{
u8 sendack;
sendack = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x9b, 0xff);
sendack = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0x9b, 0xff);
sendack |= 0x80;
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x9b, 0x00, sendack);
ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0x9b, 0x00, sendack);
}
static void send_nack(struct ast_device *ast)
{
u8 sendack;
sendack = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x9b, 0xff);
sendack = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0x9b, 0xff);
sendack &= ~0x80;
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x9b, 0x00, sendack);
ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0x9b, 0x00, sendack);
}
static bool wait_ack(struct ast_device *ast)
@ -49,7 +49,7 @@ static bool wait_ack(struct ast_device *ast)
u8 waitack;
u32 retry = 0;
do {
waitack = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd2, 0xff);
waitack = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xd2, 0xff);
waitack &= 0x80;
udelay(100);
} while ((!waitack) && (retry++ < 1000));
@ -65,7 +65,7 @@ static bool wait_nack(struct ast_device *ast)
u8 waitack;
u32 retry = 0;
do {
waitack = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd2, 0xff);
waitack = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xd2, 0xff);
waitack &= 0x80;
udelay(100);
} while ((waitack) && (retry++ < 1000));
@ -78,12 +78,12 @@ static bool wait_nack(struct ast_device *ast)
static void set_cmd_trigger(struct ast_device *ast)
{
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x9b, ~0x40, 0x40);
ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0x9b, ~0x40, 0x40);
}
static void clear_cmd_trigger(struct ast_device *ast)
{
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x9b, ~0x40, 0x00);
ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0x9b, ~0x40, 0x00);
}
#if 0
@ -92,7 +92,7 @@ static bool wait_fw_ready(struct ast_device *ast)
u8 waitready;
u32 retry = 0;
do {
waitready = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd2, 0xff);
waitready = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xd2, 0xff);
waitready &= 0x40;
udelay(100);
} while ((!waitready) && (retry++ < 1000));
@ -110,7 +110,7 @@ static bool ast_write_cmd(struct drm_device *dev, u8 data)
int retry = 0;
if (wait_nack(ast)) {
send_nack(ast);
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x9a, 0x00, data);
ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0x9a, 0x00, data);
send_ack(ast);
set_cmd_trigger(ast);
do {
@ -132,7 +132,7 @@ static bool ast_write_data(struct drm_device *dev, u8 data)
if (wait_nack(ast)) {
send_nack(ast);
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x9a, 0x00, data);
ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0x9a, 0x00, data);
send_ack(ast);
if (wait_ack(ast)) {
send_nack(ast);
@ -153,7 +153,7 @@ static bool ast_read_data(struct drm_device *dev, u8 *data)
if (wait_ack(ast) == false)
return false;
tmp = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd3, 0xff);
tmp = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xd3, 0xff);
*data = tmp;
if (wait_nack(ast) == false) {
send_nack(ast);
@ -166,7 +166,7 @@ static bool ast_read_data(struct drm_device *dev, u8 *data)
static void clear_cmd(struct ast_device *ast)
{
send_nack(ast);
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x9a, 0x00, 0x00);
ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0x9a, 0x00, 0x00);
}
#endif
@ -265,9 +265,9 @@ static bool ast_launch_m68k(struct drm_device *dev)
data |= 0x800;
ast_moutdwm(ast, 0x1e6e2040, data);
jreg = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x99, 0xfc); /* D[1:0]: Reserved Video Buffer */
jreg = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0x99, 0xfc); /* D[1:0]: Reserved Video Buffer */
jreg |= 0x02;
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x99, jreg);
ast_set_index_reg(ast, AST_IO_VGACRI, 0x99, jreg);
}
return true;
}
@ -354,7 +354,7 @@ static bool ast_init_dvo(struct drm_device *dev)
ast_write32(ast, 0xf000, 0x1);
ast_write32(ast, 0x12000, 0x1688a8a8);
jreg = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd0, 0xff);
jreg = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xd0, 0xff);
if (!(jreg & 0x80)) {
/* Init SCU DVO Settings */
data = ast_read32(ast, 0x12008);
@ -413,7 +413,7 @@ static bool ast_init_dvo(struct drm_device *dev)
ast_write32(ast, 0x1202c, data);
/* Init VGA DVO Settings */
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa3, 0xcf, 0x80);
ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xa3, 0xcf, 0x80);
return true;
}
@ -442,7 +442,7 @@ static void ast_init_analog(struct drm_device *dev)
ast_write32(ast, 0, data);
/* Disable DVO */
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa3, 0xcf, 0x00);
ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xa3, 0xcf, 0x00);
}
void ast_init_3rdtx(struct drm_device *dev)
@ -451,7 +451,7 @@ void ast_init_3rdtx(struct drm_device *dev)
u8 jreg;
if (IS_AST_GEN4(ast) || IS_AST_GEN5(ast)) {
jreg = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd1, 0xff);
jreg = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xd1, 0xff);
switch (jreg & 0x0e) {
case 0x04:
ast_init_dvo(dev);

View File

@ -39,6 +39,8 @@
#include <drm/drm_mode.h>
#include <drm/drm_framebuffer.h>
#include "ast_reg.h"
#define DRIVER_AUTHOR "Dave Airlie"
#define DRIVER_NAME "ast"
@ -259,25 +261,6 @@ static inline bool __ast_gen_is_eq(struct ast_device *ast, unsigned long gen)
#define IS_AST_GEN6(__ast) __ast_gen_is_eq(__ast, 6)
#define IS_AST_GEN7(__ast) __ast_gen_is_eq(__ast, 7)
#define AST_IO_AR_PORT_WRITE (0x40)
#define AST_IO_MISC_PORT_WRITE (0x42)
#define AST_IO_VGA_ENABLE_PORT (0x43)
#define AST_IO_SEQ_PORT (0x44)
#define AST_IO_DAC_INDEX_READ (0x47)
#define AST_IO_DAC_INDEX_WRITE (0x48)
#define AST_IO_DAC_DATA (0x49)
#define AST_IO_GR_PORT (0x4E)
#define AST_IO_CRTC_PORT (0x54)
#define AST_IO_INPUT_STATUS1_READ (0x5A)
#define AST_IO_MISC_PORT_READ (0x4C)
#define AST_IO_MM_OFFSET (0x380)
#define AST_IO_VGAIR1_VREFRESH BIT(3)
#define AST_IO_VGACRCB_HWC_ENABLED BIT(1)
#define AST_IO_VGACRCB_HWC_16BPP BIT(0) /* set: ARGB4444, cleared: 2bpp palette */
static inline u32 ast_read32(struct ast_device *ast, u32 reg)
{
return ioread32(ast->regs + reg);
@ -399,71 +382,9 @@ int ast_mode_config_init(struct ast_device *ast);
#define AST_DP501_LINKRATE 0xf014
#define AST_DP501_EDID_DATA 0xf020
/*
* Display Transmitter Type:
*/
#define TX_TYPE_MASK GENMASK(3, 1)
#define NO_TX (0 << 1)
#define ITE66121_VBIOS_TX (1 << 1)
#define SI164_VBIOS_TX (2 << 1)
#define CH7003_VBIOS_TX (3 << 1)
#define DP501_VBIOS_TX (4 << 1)
#define ANX9807_VBIOS_TX (5 << 1)
#define TX_FW_EMBEDDED_FW_TX (6 << 1)
#define ASTDP_DPMCU_TX (7 << 1)
#define AST_VRAM_INIT_STATUS_MASK GENMASK(7, 6)
//#define AST_VRAM_INIT_BY_BMC BIT(7)
//#define AST_VRAM_INIT_READY BIT(6)
/* Define for Soc scratched reg used on ASTDP */
#define AST_DP_PHY_SLEEP BIT(4)
#define AST_DP_VIDEO_ENABLE BIT(0)
#define AST_DP_POWER_ON true
#define AST_DP_POWER_OFF false
/*
* CRD1[b5]: DP MCU FW is executing
* CRDC[b0]: DP link success
* CRDF[b0]: DP HPD
* CRE5[b0]: Host reading EDID process is done
*/
#define ASTDP_MCU_FW_EXECUTING BIT(5)
#define ASTDP_LINK_SUCCESS BIT(0)
#define ASTDP_HPD BIT(0)
#define ASTDP_HOST_EDID_READ_DONE BIT(0)
#define ASTDP_HOST_EDID_READ_DONE_MASK GENMASK(0, 0)
/*
* CRB8[b1]: Enable VSYNC off
* CRB8[b0]: Enable HSYNC off
*/
#define AST_DPMS_VSYNC_OFF BIT(1)
#define AST_DPMS_HSYNC_OFF BIT(0)
/*
* CRDF[b4]: Mirror of AST_DP_VIDEO_ENABLE
* Precondition: A. ~AST_DP_PHY_SLEEP &&
* B. DP_HPD &&
* C. DP_LINK_SUCCESS
*/
#define ASTDP_MIRROR_VIDEO_ENABLE BIT(4)
#define ASTDP_EDID_READ_POINTER_MASK GENMASK(7, 0)
#define ASTDP_EDID_VALID_FLAG_MASK GENMASK(0, 0)
#define ASTDP_EDID_READ_DATA_MASK GENMASK(7, 0)
/*
* ASTDP setmode registers:
* CRE0[7:0]: MISC0 ((0x00: 18-bpp) or (0x20: 24-bpp)
* CRE1[7:0]: MISC1 (default: 0x00)
* CRE2[7:0]: video format index (0x00 ~ 0x20 or 0x40 ~ 0x50)
*/
#define ASTDP_MISC0_24bpp BIT(5)
#define ASTDP_MISC1 0
#define ASTDP_AND_CLEAR_MASK 0x00
/*
* ASTDP resoultion table:
* EX: ASTDP_A_B_C:

View File

@ -35,8 +35,8 @@ static void ast_i2c_setsda(void *i2c_priv, int data)
for (i = 0; i < 0x10000; i++) {
ujcrb7 = ((data & 0x01) ? 0 : 1) << 2;
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0xf1, ujcrb7);
jtemp = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x04);
ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xb7, 0xf1, ujcrb7);
jtemp = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xb7, 0x04);
if (ujcrb7 == jtemp)
break;
}
@ -51,8 +51,8 @@ static void ast_i2c_setscl(void *i2c_priv, int clock)
for (i = 0; i < 0x10000; i++) {
ujcrb7 = ((clock & 0x01) ? 0 : 1);
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0xf4, ujcrb7);
jtemp = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x01);
ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xb7, 0xf4, ujcrb7);
jtemp = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xb7, 0x01);
if (ujcrb7 == jtemp)
break;
}
@ -66,14 +66,14 @@ static int ast_i2c_getsda(void *i2c_priv)
count = 0;
pass = 0;
val = (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x20) >> 5) & 0x01;
val = (ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xb7, 0x20) >> 5) & 0x01;
do {
val2 = (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x20) >> 5) & 0x01;
val2 = (ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xb7, 0x20) >> 5) & 0x01;
if (val == val2) {
pass++;
} else {
pass = 0;
val = (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x20) >> 5) & 0x01;
val = (ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xb7, 0x20) >> 5) & 0x01;
}
} while ((pass < 5) && (count++ < 0x10000));
@ -88,14 +88,14 @@ static int ast_i2c_getscl(void *i2c_priv)
count = 0;
pass = 0;
val = (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x10) >> 4) & 0x01;
val = (ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xb7, 0x10) >> 4) & 0x01;
do {
val2 = (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x10) >> 4) & 0x01;
val2 = (ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xb7, 0x10) >> 4) & 0x01;
if (val == val2) {
pass++;
} else {
pass = 0;
val = (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x10) >> 4) & 0x01;
val = (ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xb7, 0x10) >> 4) & 0x01;
}
} while ((pass < 5) && (count++ < 0x10000));

View File

@ -40,7 +40,7 @@ static bool ast_is_vga_enabled(struct drm_device *dev)
struct ast_device *ast = to_ast_device(dev);
u8 ch;
ch = ast_io_read8(ast, AST_IO_VGA_ENABLE_PORT);
ch = ast_io_read8(ast, AST_IO_VGAER);
return !!(ch & 0x01);
}
@ -49,8 +49,8 @@ static void ast_enable_vga(struct drm_device *dev)
{
struct ast_device *ast = to_ast_device(dev);
ast_io_write8(ast, AST_IO_VGA_ENABLE_PORT, 0x01);
ast_io_write8(ast, AST_IO_MISC_PORT_WRITE, 0x01);
ast_io_write8(ast, AST_IO_VGAER, 0x01);
ast_io_write8(ast, AST_IO_VGAMR_W, 0x01);
}
/*
@ -62,21 +62,21 @@ static void ast_enable_mmio_release(void *data)
struct ast_device *ast = data;
/* enable standard VGA decode */
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa1, 0x04);
ast_set_index_reg(ast, AST_IO_VGACRI, 0xa1, 0x04);
}
static int ast_enable_mmio(struct ast_device *ast)
{
struct drm_device *dev = &ast->base;
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa1, 0x06);
ast_set_index_reg(ast, AST_IO_VGACRI, 0xa1, 0x06);
return devm_add_action_or_reset(dev->dev, ast_enable_mmio_release, ast);
}
static void ast_open_key(struct ast_device *ast)
{
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x80, 0xA8);
ast_set_index_reg(ast, AST_IO_VGACRI, 0x80, 0xA8);
}
static int ast_device_config_init(struct ast_device *ast)
@ -105,8 +105,8 @@ static int ast_device_config_init(struct ast_device *ast)
* is disabled. We force using P2A if VGA only mode bit
* is set D[7]
*/
jregd0 = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd0, 0xff);
jregd1 = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd1, 0xff);
jregd0 = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xd0, 0xff);
jregd1 = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xd1, 0xff);
if (!(jregd0 & 0x80) || !(jregd1 & 0x10)) {
/*
@ -219,7 +219,7 @@ static void ast_detect_widescreen(struct ast_device *ast)
ast->support_wide_screen = false;
break;
default:
jreg = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd0, 0xff);
jreg = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xd0, 0xff);
if (!(jreg & 0x80))
ast->support_wide_screen = true;
else if (jreg & 0x01)
@ -256,7 +256,7 @@ static void ast_detect_tx_chip(struct ast_device *ast, bool need_post)
* SIL164 when there is none.
*/
if (!need_post) {
jreg = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa3, 0xff);
jreg = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xa3, 0xff);
if (jreg & 0x80)
ast->tx_chip_types = AST_TX_SIL164_BIT;
}
@ -267,7 +267,7 @@ static void ast_detect_tx_chip(struct ast_device *ast, bool need_post)
* the SOC scratch register #1 bits 11:8 (interestingly marked
* as "reserved" in the spec)
*/
jreg = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd1, 0xff);
jreg = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xd1, 0xff);
switch (jreg) {
case 0x04:
ast->tx_chip_types = AST_TX_SIL164_BIT;
@ -286,7 +286,7 @@ static void ast_detect_tx_chip(struct ast_device *ast, bool need_post)
ast->tx_chip_types = AST_TX_DP501_BIT;
}
} else if (IS_AST_GEN7(ast)) {
if (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xD1, TX_TYPE_MASK) ==
if (ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xD1, TX_TYPE_MASK) ==
ASTDP_DPMCU_TX) {
ast->tx_chip_types = AST_TX_ASTDP_BIT;
ast_dp_launch(&ast->base);

View File

@ -39,7 +39,7 @@ static u32 ast_get_vram_size(struct ast_device *ast)
u32 vram_size;
vram_size = AST_VIDMEM_DEFAULT_SIZE;
jreg = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xaa, 0xff);
jreg = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xaa, 0xff);
switch (jreg & 3) {
case 0:
vram_size = AST_VIDMEM_SIZE_8M;
@ -55,7 +55,7 @@ static u32 ast_get_vram_size(struct ast_device *ast)
break;
}
jreg = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x99, 0xff);
jreg = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0x99, 0xff);
switch (jreg & 0x03) {
case 1:
vram_size -= 0x100000;

View File

@ -55,14 +55,14 @@ static inline void ast_load_palette_index(struct ast_device *ast,
u8 index, u8 red, u8 green,
u8 blue)
{
ast_io_write8(ast, AST_IO_DAC_INDEX_WRITE, index);
ast_io_read8(ast, AST_IO_SEQ_PORT);
ast_io_write8(ast, AST_IO_DAC_DATA, red);
ast_io_read8(ast, AST_IO_SEQ_PORT);
ast_io_write8(ast, AST_IO_DAC_DATA, green);
ast_io_read8(ast, AST_IO_SEQ_PORT);
ast_io_write8(ast, AST_IO_DAC_DATA, blue);
ast_io_read8(ast, AST_IO_SEQ_PORT);
ast_io_write8(ast, AST_IO_VGADWR, index);
ast_io_read8(ast, AST_IO_VGASRI);
ast_io_write8(ast, AST_IO_VGAPDR, red);
ast_io_read8(ast, AST_IO_VGASRI);
ast_io_write8(ast, AST_IO_VGAPDR, green);
ast_io_read8(ast, AST_IO_VGASRI);
ast_io_write8(ast, AST_IO_VGAPDR, blue);
ast_io_read8(ast, AST_IO_VGASRI);
}
static void ast_crtc_set_gamma_linear(struct ast_device *ast,
@ -253,13 +253,13 @@ static void ast_set_vbios_color_reg(struct ast_device *ast,
return;
}
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x8c, (u8)((color_index & 0x0f) << 4));
ast_set_index_reg(ast, AST_IO_VGACRI, 0x8c, (u8)((color_index & 0x0f) << 4));
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x91, 0x00);
ast_set_index_reg(ast, AST_IO_VGACRI, 0x91, 0x00);
if (vbios_mode->enh_table->flags & NewModeInfo) {
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x91, 0xa8);
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x92, format->cpp[0] * 8);
ast_set_index_reg(ast, AST_IO_VGACRI, 0x91, 0xa8);
ast_set_index_reg(ast, AST_IO_VGACRI, 0x92, format->cpp[0] * 8);
}
}
@ -272,18 +272,18 @@ static void ast_set_vbios_mode_reg(struct ast_device *ast,
refresh_rate_index = vbios_mode->enh_table->refresh_rate_index;
mode_id = vbios_mode->enh_table->mode_id;
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x8d, refresh_rate_index & 0xff);
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x8e, mode_id & 0xff);
ast_set_index_reg(ast, AST_IO_VGACRI, 0x8d, refresh_rate_index & 0xff);
ast_set_index_reg(ast, AST_IO_VGACRI, 0x8e, mode_id & 0xff);
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x91, 0x00);
ast_set_index_reg(ast, AST_IO_VGACRI, 0x91, 0x00);
if (vbios_mode->enh_table->flags & NewModeInfo) {
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x91, 0xa8);
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x93, adjusted_mode->clock / 1000);
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x94, adjusted_mode->crtc_hdisplay);
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x95, adjusted_mode->crtc_hdisplay >> 8);
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x96, adjusted_mode->crtc_vdisplay);
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x97, adjusted_mode->crtc_vdisplay >> 8);
ast_set_index_reg(ast, AST_IO_VGACRI, 0x91, 0xa8);
ast_set_index_reg(ast, AST_IO_VGACRI, 0x93, adjusted_mode->clock / 1000);
ast_set_index_reg(ast, AST_IO_VGACRI, 0x94, adjusted_mode->crtc_hdisplay);
ast_set_index_reg(ast, AST_IO_VGACRI, 0x95, adjusted_mode->crtc_hdisplay >> 8);
ast_set_index_reg(ast, AST_IO_VGACRI, 0x96, adjusted_mode->crtc_vdisplay);
ast_set_index_reg(ast, AST_IO_VGACRI, 0x97, adjusted_mode->crtc_vdisplay >> 8);
}
}
@ -298,41 +298,41 @@ static void ast_set_std_reg(struct ast_device *ast,
stdtable = vbios_mode->std_table;
jreg = stdtable->misc;
ast_io_write8(ast, AST_IO_MISC_PORT_WRITE, jreg);
ast_io_write8(ast, AST_IO_VGAMR_W, jreg);
/* Set SEQ; except Screen Disable field */
ast_set_index_reg(ast, AST_IO_SEQ_PORT, 0x00, 0x03);
ast_set_index_reg_mask(ast, AST_IO_SEQ_PORT, 0x01, 0xdf, stdtable->seq[0]);
ast_set_index_reg(ast, AST_IO_VGASRI, 0x00, 0x03);
ast_set_index_reg_mask(ast, AST_IO_VGASRI, 0x01, 0xdf, stdtable->seq[0]);
for (i = 1; i < 4; i++) {
jreg = stdtable->seq[i];
ast_set_index_reg(ast, AST_IO_SEQ_PORT, (i + 1), jreg);
ast_set_index_reg(ast, AST_IO_VGASRI, (i + 1), jreg);
}
/* Set CRTC; except base address and offset */
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x11, 0x7f, 0x00);
ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0x11, 0x7f, 0x00);
for (i = 0; i < 12; i++)
ast_set_index_reg(ast, AST_IO_CRTC_PORT, i, stdtable->crtc[i]);
ast_set_index_reg(ast, AST_IO_VGACRI, i, stdtable->crtc[i]);
for (i = 14; i < 19; i++)
ast_set_index_reg(ast, AST_IO_CRTC_PORT, i, stdtable->crtc[i]);
ast_set_index_reg(ast, AST_IO_VGACRI, i, stdtable->crtc[i]);
for (i = 20; i < 25; i++)
ast_set_index_reg(ast, AST_IO_CRTC_PORT, i, stdtable->crtc[i]);
ast_set_index_reg(ast, AST_IO_VGACRI, i, stdtable->crtc[i]);
/* set AR */
jreg = ast_io_read8(ast, AST_IO_INPUT_STATUS1_READ);
jreg = ast_io_read8(ast, AST_IO_VGAIR1_R);
for (i = 0; i < 20; i++) {
jreg = stdtable->ar[i];
ast_io_write8(ast, AST_IO_AR_PORT_WRITE, (u8)i);
ast_io_write8(ast, AST_IO_AR_PORT_WRITE, jreg);
ast_io_write8(ast, AST_IO_VGAARI_W, (u8)i);
ast_io_write8(ast, AST_IO_VGAARI_W, jreg);
}
ast_io_write8(ast, AST_IO_AR_PORT_WRITE, 0x14);
ast_io_write8(ast, AST_IO_AR_PORT_WRITE, 0x00);
ast_io_write8(ast, AST_IO_VGAARI_W, 0x14);
ast_io_write8(ast, AST_IO_VGAARI_W, 0x00);
jreg = ast_io_read8(ast, AST_IO_INPUT_STATUS1_READ);
ast_io_write8(ast, AST_IO_AR_PORT_WRITE, 0x20);
jreg = ast_io_read8(ast, AST_IO_VGAIR1_R);
ast_io_write8(ast, AST_IO_VGAARI_W, 0x20);
/* Set GR */
for (i = 0; i < 9; i++)
ast_set_index_reg(ast, AST_IO_GR_PORT, i, stdtable->gr[i]);
ast_set_index_reg(ast, AST_IO_VGAGRI, i, stdtable->gr[i]);
}
static void ast_set_crtc_reg(struct ast_device *ast,
@ -346,48 +346,48 @@ static void ast_set_crtc_reg(struct ast_device *ast,
(vbios_mode->enh_table->flags & AST2500PreCatchCRT))
precache = 40;
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x11, 0x7f, 0x00);
ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0x11, 0x7f, 0x00);
temp = (mode->crtc_htotal >> 3) - 5;
if (temp & 0x100)
jregAC |= 0x01; /* HT D[8] */
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x00, 0x00, temp);
ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0x00, 0x00, temp);
temp = (mode->crtc_hdisplay >> 3) - 1;
if (temp & 0x100)
jregAC |= 0x04; /* HDE D[8] */
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x01, 0x00, temp);
ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0x01, 0x00, temp);
temp = (mode->crtc_hblank_start >> 3) - 1;
if (temp & 0x100)
jregAC |= 0x10; /* HBS D[8] */
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x02, 0x00, temp);
ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0x02, 0x00, temp);
temp = ((mode->crtc_hblank_end >> 3) - 1) & 0x7f;
if (temp & 0x20)
jreg05 |= 0x80; /* HBE D[5] */
if (temp & 0x40)
jregAD |= 0x01; /* HBE D[5] */
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x03, 0xE0, (temp & 0x1f));
ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0x03, 0xE0, (temp & 0x1f));
temp = ((mode->crtc_hsync_start-precache) >> 3) - 1;
if (temp & 0x100)
jregAC |= 0x40; /* HRS D[5] */
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x04, 0x00, temp);
ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0x04, 0x00, temp);
temp = (((mode->crtc_hsync_end-precache) >> 3) - 1) & 0x3f;
if (temp & 0x20)
jregAD |= 0x04; /* HRE D[5] */
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x05, 0x60, (u8)((temp & 0x1f) | jreg05));
ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0x05, 0x60, (u8)((temp & 0x1f) | jreg05));
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xAC, 0x00, jregAC);
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xAD, 0x00, jregAD);
ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xAC, 0x00, jregAC);
ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xAD, 0x00, jregAD);
// Workaround for HSync Time non octave pixels (1920x1080@60Hz HSync 44 pixels);
if (IS_AST_GEN7(ast) && (mode->crtc_vdisplay == 1080))
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xFC, 0xFD, 0x02);
ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xFC, 0xFD, 0x02);
else
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xFC, 0xFD, 0x00);
ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xFC, 0xFD, 0x00);
/* vert timings */
temp = (mode->crtc_vtotal) - 2;
@ -397,7 +397,7 @@ static void ast_set_crtc_reg(struct ast_device *ast,
jreg07 |= 0x20;
if (temp & 0x400)
jregAE |= 0x01;
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x06, 0x00, temp);
ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0x06, 0x00, temp);
temp = (mode->crtc_vsync_start) - 1;
if (temp & 0x100)
@ -406,14 +406,14 @@ static void ast_set_crtc_reg(struct ast_device *ast,
jreg07 |= 0x80;
if (temp & 0x400)
jregAE |= 0x08;
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x10, 0x00, temp);
ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0x10, 0x00, temp);
temp = (mode->crtc_vsync_end - 1) & 0x3f;
if (temp & 0x10)
jregAE |= 0x20;
if (temp & 0x20)
jregAE |= 0x40;
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x11, 0x70, temp & 0xf);
ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0x11, 0x70, temp & 0xf);
temp = mode->crtc_vdisplay - 1;
if (temp & 0x100)
@ -422,7 +422,7 @@ static void ast_set_crtc_reg(struct ast_device *ast,
jreg07 |= 0x40;
if (temp & 0x400)
jregAE |= 0x02;
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x12, 0x00, temp);
ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0x12, 0x00, temp);
temp = mode->crtc_vblank_start - 1;
if (temp & 0x100)
@ -431,23 +431,23 @@ static void ast_set_crtc_reg(struct ast_device *ast,
jreg09 |= 0x20;
if (temp & 0x400)
jregAE |= 0x04;
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x15, 0x00, temp);
ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0x15, 0x00, temp);
temp = mode->crtc_vblank_end - 1;
if (temp & 0x100)
jregAE |= 0x10;
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x16, 0x00, temp);
ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0x16, 0x00, temp);
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x07, 0x00, jreg07);
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x09, 0xdf, jreg09);
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xAE, 0x00, (jregAE | 0x80));
ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0x07, 0x00, jreg07);
ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0x09, 0xdf, jreg09);
ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xAE, 0x00, (jregAE | 0x80));
if (precache)
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb6, 0x3f, 0x80);
ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xb6, 0x3f, 0x80);
else
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb6, 0x3f, 0x00);
ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xb6, 0x3f, 0x00);
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x11, 0x7f, 0x80);
ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0x11, 0x7f, 0x80);
}
static void ast_set_offset_reg(struct ast_device *ast,
@ -456,8 +456,8 @@ static void ast_set_offset_reg(struct ast_device *ast,
u16 offset;
offset = fb->pitches[0] >> 3;
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x13, (offset & 0xff));
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xb0, (offset >> 8) & 0x3f);
ast_set_index_reg(ast, AST_IO_VGACRI, 0x13, (offset & 0xff));
ast_set_index_reg(ast, AST_IO_VGACRI, 0xb0, (offset >> 8) & 0x3f);
}
static void ast_set_dclk_reg(struct ast_device *ast,
@ -471,9 +471,9 @@ static void ast_set_dclk_reg(struct ast_device *ast,
else
clk_info = &dclk_table[vbios_mode->enh_table->dclk_index];
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xc0, 0x00, clk_info->param1);
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xc1, 0x00, clk_info->param2);
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xbb, 0x0f,
ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xc0, 0x00, clk_info->param1);
ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xc1, 0x00, clk_info->param2);
ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xbb, 0x0f,
(clk_info->param3 & 0xc0) |
((clk_info->param3 & 0x3) << 4));
}
@ -502,26 +502,26 @@ static void ast_set_color_reg(struct ast_device *ast,
break;
}
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa0, 0x8f, jregA0);
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa3, 0xf0, jregA3);
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa8, 0xfd, jregA8);
ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xa0, 0x8f, jregA0);
ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xa3, 0xf0, jregA3);
ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xa8, 0xfd, jregA8);
}
static void ast_set_crtthd_reg(struct ast_device *ast)
{
/* Set Threshold */
if (IS_AST_GEN7(ast)) {
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa7, 0xe0);
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa6, 0xa0);
ast_set_index_reg(ast, AST_IO_VGACRI, 0xa7, 0xe0);
ast_set_index_reg(ast, AST_IO_VGACRI, 0xa6, 0xa0);
} else if (IS_AST_GEN6(ast) || IS_AST_GEN5(ast) || IS_AST_GEN4(ast)) {
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa7, 0x78);
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa6, 0x60);
ast_set_index_reg(ast, AST_IO_VGACRI, 0xa7, 0x78);
ast_set_index_reg(ast, AST_IO_VGACRI, 0xa6, 0x60);
} else if (IS_AST_GEN3(ast) || IS_AST_GEN2(ast)) {
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa7, 0x3f);
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa6, 0x2f);
ast_set_index_reg(ast, AST_IO_VGACRI, 0xa7, 0x3f);
ast_set_index_reg(ast, AST_IO_VGACRI, 0xa6, 0x2f);
} else {
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa7, 0x2f);
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa6, 0x1f);
ast_set_index_reg(ast, AST_IO_VGACRI, 0xa7, 0x2f);
ast_set_index_reg(ast, AST_IO_VGACRI, 0xa6, 0x1f);
}
}
@ -531,13 +531,13 @@ static void ast_set_sync_reg(struct ast_device *ast,
{
u8 jreg;
jreg = ast_io_read8(ast, AST_IO_MISC_PORT_READ);
jreg = ast_io_read8(ast, AST_IO_VGAMR_R);
jreg &= ~0xC0;
if (vbios_mode->enh_table->flags & NVSync)
jreg |= 0x80;
if (vbios_mode->enh_table->flags & NHSync)
jreg |= 0x40;
ast_io_write8(ast, AST_IO_MISC_PORT_WRITE, jreg);
ast_io_write8(ast, AST_IO_VGAMR_W, jreg);
}
static void ast_set_start_address_crt1(struct ast_device *ast,
@ -546,9 +546,9 @@ static void ast_set_start_address_crt1(struct ast_device *ast,
u32 addr;
addr = offset >> 2;
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x0d, (u8)(addr & 0xff));
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x0c, (u8)((addr >> 8) & 0xff));
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xaf, (u8)((addr >> 16) & 0xff));
ast_set_index_reg(ast, AST_IO_VGACRI, 0x0d, (u8)(addr & 0xff));
ast_set_index_reg(ast, AST_IO_VGACRI, 0x0c, (u8)((addr >> 8) & 0xff));
ast_set_index_reg(ast, AST_IO_VGACRI, 0xaf, (u8)((addr >> 16) & 0xff));
}
@ -558,7 +558,7 @@ static void ast_wait_for_vretrace(struct ast_device *ast)
u8 vgair1;
do {
vgair1 = ast_io_read8(ast, AST_IO_INPUT_STATUS1_READ);
vgair1 = ast_io_read8(ast, AST_IO_VGAIR1_R);
} while (!(vgair1 & AST_IO_VGAIR1_VREFRESH) && time_before(jiffies, timeout));
}
@ -689,7 +689,7 @@ static void ast_primary_plane_helper_atomic_enable(struct drm_plane *plane,
* Therefore only reprogram the address after enabling the plane.
*/
ast_set_start_address_crt1(ast, (u32)ast_plane->offset);
ast_set_index_reg_mask(ast, AST_IO_SEQ_PORT, 0x1, 0xdf, 0x00);
ast_set_index_reg_mask(ast, AST_IO_VGASRI, 0x1, 0xdf, 0x00);
}
static void ast_primary_plane_helper_atomic_disable(struct drm_plane *plane,
@ -697,7 +697,7 @@ static void ast_primary_plane_helper_atomic_disable(struct drm_plane *plane,
{
struct ast_device *ast = to_ast_device(plane->dev);
ast_set_index_reg_mask(ast, AST_IO_SEQ_PORT, 0x1, 0xdf, 0x20);
ast_set_index_reg_mask(ast, AST_IO_VGASRI, 0x1, 0xdf, 0x20);
}
static const struct drm_plane_helper_funcs ast_primary_plane_helper_funcs = {
@ -814,9 +814,9 @@ static void ast_set_cursor_base(struct ast_device *ast, u64 address)
u8 addr1 = (address >> 11) & 0xff;
u8 addr2 = (address >> 19) & 0xff;
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc8, addr0);
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc9, addr1);
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xca, addr2);
ast_set_index_reg(ast, AST_IO_VGACRI, 0xc8, addr0);
ast_set_index_reg(ast, AST_IO_VGACRI, 0xc9, addr1);
ast_set_index_reg(ast, AST_IO_VGACRI, 0xca, addr2);
}
static void ast_set_cursor_location(struct ast_device *ast, u16 x, u16 y,
@ -827,12 +827,12 @@ static void ast_set_cursor_location(struct ast_device *ast, u16 x, u16 y,
u8 y0 = (y & 0x00ff);
u8 y1 = (y & 0x0700) >> 8;
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc2, x_offset);
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc3, y_offset);
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc4, x0);
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc5, x1);
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc6, y0);
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc7, y1);
ast_set_index_reg(ast, AST_IO_VGACRI, 0xc2, x_offset);
ast_set_index_reg(ast, AST_IO_VGACRI, 0xc3, y_offset);
ast_set_index_reg(ast, AST_IO_VGACRI, 0xc4, x0);
ast_set_index_reg(ast, AST_IO_VGACRI, 0xc5, x1);
ast_set_index_reg(ast, AST_IO_VGACRI, 0xc6, y0);
ast_set_index_reg(ast, AST_IO_VGACRI, 0xc7, y1);
}
static void ast_set_cursor_enabled(struct ast_device *ast, bool enabled)
@ -845,7 +845,7 @@ static void ast_set_cursor_enabled(struct ast_device *ast, bool enabled)
if (enabled)
vgacrcb |= AST_IO_VGACRCB_HWC_ENABLED;
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xcb, mask, vgacrcb);
ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xcb, mask, vgacrcb);
}
static const uint32_t ast_cursor_plane_formats[] = {
@ -1014,8 +1014,8 @@ static void ast_crtc_dpms(struct drm_crtc *crtc, int mode)
*/
switch (mode) {
case DRM_MODE_DPMS_ON:
ast_set_index_reg_mask(ast, AST_IO_SEQ_PORT, 0x01, 0xdf, 0);
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb6, 0xfc, 0);
ast_set_index_reg_mask(ast, AST_IO_VGASRI, 0x01, 0xdf, 0);
ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xb6, 0xfc, 0);
if (ast->tx_chip_types & AST_TX_DP501_BIT)
ast_set_dp501_video_output(crtc->dev, 1);
@ -1051,8 +1051,8 @@ static void ast_crtc_dpms(struct drm_crtc *crtc, int mode)
ast_dp_power_on_off(crtc->dev, AST_DP_POWER_OFF);
}
ast_set_index_reg_mask(ast, AST_IO_SEQ_PORT, 0x01, 0xdf, 0x20);
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb6, 0xfc, ch);
ast_set_index_reg_mask(ast, AST_IO_VGASRI, 0x01, 0xdf, 0x20);
ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xb6, 0xfc, ch);
break;
}
}
@ -1086,7 +1086,7 @@ ast_crtc_helper_mode_valid(struct drm_crtc *crtc, const struct drm_display_mode
return MODE_OK;
if ((mode->hdisplay == 1920) && (mode->vdisplay == 1200)) {
jtemp = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd1, 0xff);
jtemp = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xd1, 0xff);
if (jtemp & 0x01)
return MODE_NOMODE;
else
@ -1219,7 +1219,7 @@ static void ast_crtc_helper_atomic_enable(struct drm_crtc *crtc, struct drm_atom
struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
ast_set_vbios_mode_reg(ast, adjusted_mode, vbios_mode_info);
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa1, 0x06);
ast_set_index_reg(ast, AST_IO_VGACRI, 0xa1, 0x06);
ast_set_std_reg(ast, adjusted_mode, vbios_mode_info);
ast_set_crtc_reg(ast, adjusted_mode, vbios_mode_info);
ast_set_dclk_reg(ast, adjusted_mode, vbios_mode_info);

View File

@ -49,7 +49,7 @@ ast_set_def_ext_reg(struct drm_device *dev)
/* reset scratch */
for (i = 0x81; i <= 0x9f; i++)
ast_set_index_reg(ast, AST_IO_CRTC_PORT, i, 0x00);
ast_set_index_reg(ast, AST_IO_VGACRI, i, 0x00);
if (IS_AST_GEN4(ast) || IS_AST_GEN5(ast) || IS_AST_GEN6(ast))
ext_reg_info = extreginfo_ast2300;
@ -58,23 +58,23 @@ ast_set_def_ext_reg(struct drm_device *dev)
index = 0xa0;
while (*ext_reg_info != 0xff) {
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, index, 0x00, *ext_reg_info);
ast_set_index_reg_mask(ast, AST_IO_VGACRI, index, 0x00, *ext_reg_info);
index++;
ext_reg_info++;
}
/* disable standard IO/MEM decode if secondary */
/* ast_set_index_reg-mask(ast, AST_IO_CRTC_PORT, 0xa1, 0xff, 0x3); */
/* ast_set_index_reg-mask(ast, AST_IO_VGACRI, 0xa1, 0xff, 0x3); */
/* Set Ext. Default */
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x8c, 0x00, 0x01);
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x00, 0x00);
ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0x8c, 0x00, 0x01);
ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xb7, 0x00, 0x00);
/* Enable RAMDAC for A1 */
reg = 0x04;
if (IS_AST_GEN4(ast) || IS_AST_GEN5(ast) || IS_AST_GEN6(ast))
reg |= 0x20;
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb6, 0xff, reg);
ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xb6, 0xff, reg);
}
u32 ast_mindwm(struct ast_device *ast, u32 r)
@ -245,7 +245,7 @@ static void ast_init_dram_reg(struct drm_device *dev)
u32 data, temp, i;
const struct ast_dramstruct *dram_reg_info;
j = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd0, 0xff);
j = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xd0, 0xff);
if ((j & 0x80) == 0) { /* VGA only */
if (IS_AST_GEN1(ast)) {
@ -325,7 +325,7 @@ static void ast_init_dram_reg(struct drm_device *dev)
/* wait ready */
do {
j = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd0, 0xff);
j = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xd0, 0xff);
} while ((j & 0x40) == 0);
}
@ -349,7 +349,7 @@ void ast_post_gpu(struct drm_device *dev)
ast_init_3rdtx(dev);
} else {
if (ast->tx_chip_types & AST_TX_SIL164_BIT)
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa3, 0xcf, 0x80); /* Enable DVO */
ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xa3, 0xcf, 0x80); /* Enable DVO */
}
}
@ -1562,7 +1562,7 @@ static void ast_post_chip_2300(struct drm_device *dev)
u32 temp;
u8 reg;
reg = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd0, 0xff);
reg = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xd0, 0xff);
if ((reg & 0x80) == 0) {/* vga only */
ast_write32(ast, 0xf004, 0x1e6e0000);
ast_write32(ast, 0xf000, 0x1);
@ -1634,7 +1634,7 @@ static void ast_post_chip_2300(struct drm_device *dev)
/* wait ready */
do {
reg = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd0, 0xff);
reg = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xd0, 0xff);
} while ((reg & 0x40) == 0);
}
@ -2027,7 +2027,7 @@ void ast_post_chip_2500(struct drm_device *dev)
u32 temp;
u8 reg;
reg = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd0, 0xff);
reg = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xd0, 0xff);
if ((reg & AST_VRAM_INIT_STATUS_MASK) == 0) {/* vga only */
/* Clear bus lock condition */
ast_patch_ahb_2500(ast);
@ -2075,6 +2075,6 @@ void ast_post_chip_2500(struct drm_device *dev)
/* wait ready */
do {
reg = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd0, 0xff);
reg = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xd0, 0xff);
} while ((reg & 0x40) == 0);
}

View File

@ -0,0 +1,99 @@
/* SPDX-License-Identifier: MIT */
#ifndef __AST_REG_H__
#define __AST_REG_H__
#include <linux/bits.h>
/*
* Modesetting
*/
#define AST_IO_MM_OFFSET (0x380)
#define AST_IO_VGAARI_W (0x40)
#define AST_IO_VGAMR_W (0x42)
#define AST_IO_VGAER (0x43)
#define AST_IO_VGASRI (0x44)
#define AST_IO_VGADRR (0x47)
#define AST_IO_VGADWR (0x48)
#define AST_IO_VGAPDR (0x49)
#define AST_IO_VGAGRI (0x4E)
#define AST_IO_VGACRI (0x54)
#define AST_IO_VGACRCB_HWC_16BPP BIT(0) /* set: ARGB4444, cleared: 2bpp palette */
#define AST_IO_VGACRCB_HWC_ENABLED BIT(1)
#define AST_IO_VGAIR1_R (0x5A)
#define AST_IO_VGAIR1_VREFRESH BIT(3)
#define AST_IO_VGAMR_R (0x4C)
/*
* Display Transmitter Type
*/
#define TX_TYPE_MASK GENMASK(3, 1)
#define NO_TX (0 << 1)
#define ITE66121_VBIOS_TX (1 << 1)
#define SI164_VBIOS_TX (2 << 1)
#define CH7003_VBIOS_TX (3 << 1)
#define DP501_VBIOS_TX (4 << 1)
#define ANX9807_VBIOS_TX (5 << 1)
#define TX_FW_EMBEDDED_FW_TX (6 << 1)
#define ASTDP_DPMCU_TX (7 << 1)
#define AST_VRAM_INIT_STATUS_MASK GENMASK(7, 6)
//#define AST_VRAM_INIT_BY_BMC BIT(7)
//#define AST_VRAM_INIT_READY BIT(6)
/*
* AST DisplayPort
*/
/* Define for Soc scratched reg used on ASTDP */
#define AST_DP_PHY_SLEEP BIT(4)
#define AST_DP_VIDEO_ENABLE BIT(0)
/*
* CRD1[b5]: DP MCU FW is executing
* CRDC[b0]: DP link success
* CRDF[b0]: DP HPD
* CRE5[b0]: Host reading EDID process is done
*/
#define ASTDP_MCU_FW_EXECUTING BIT(5)
#define ASTDP_LINK_SUCCESS BIT(0)
#define ASTDP_HPD BIT(0)
#define ASTDP_HOST_EDID_READ_DONE BIT(0)
#define ASTDP_HOST_EDID_READ_DONE_MASK GENMASK(0, 0)
/*
* CRB8[b1]: Enable VSYNC off
* CRB8[b0]: Enable HSYNC off
*/
#define AST_DPMS_VSYNC_OFF BIT(1)
#define AST_DPMS_HSYNC_OFF BIT(0)
/*
* CRDF[b4]: Mirror of AST_DP_VIDEO_ENABLE
* Precondition: A. ~AST_DP_PHY_SLEEP &&
* B. DP_HPD &&
* C. DP_LINK_SUCCESS
*/
#define ASTDP_MIRROR_VIDEO_ENABLE BIT(4)
#define ASTDP_EDID_READ_POINTER_MASK GENMASK(7, 0)
#define ASTDP_EDID_VALID_FLAG_MASK GENMASK(0, 0)
#define ASTDP_EDID_READ_DATA_MASK GENMASK(7, 0)
/*
* ASTDP setmode registers:
* CRE0[7:0]: MISC0 ((0x00: 18-bpp) or (0x20: 24-bpp)
* CRE1[7:0]: MISC1 (default: 0x00)
* CRE2[7:0]: video format index (0x00 ~ 0x20 or 0x40 ~ 0x50)
*/
#define ASTDP_MISC0_24bpp BIT(5)
#define ASTDP_MISC1 0
#define ASTDP_AND_CLEAR_MASK 0x00
#endif

View File

@ -333,6 +333,18 @@ enum adv7511_type {
#define ADV7511_MAX_ADDRS 3
struct adv7511_chip_info {
enum adv7511_type type;
unsigned int max_mode_clock_khz;
unsigned int max_lane_freq_khz;
const char * const *supply_names;
unsigned int num_supplies;
unsigned int reg_cec_offset;
bool has_dsi;
bool link_config;
bool hpd_override_enable;
};
struct adv7511 {
struct i2c_client *i2c_main;
struct i2c_client *i2c_edid;
@ -341,7 +353,6 @@ struct adv7511 {
struct regmap *regmap;
struct regmap *regmap_cec;
unsigned int reg_cec_offset;
enum drm_connector_status status;
bool powered;
@ -369,7 +380,6 @@ struct adv7511 {
struct gpio_desc *gpio_pd;
struct regulator_bulk_data *supplies;
unsigned int num_supplies;
/* ADV7533 DSI RX related params */
struct device_node *host_node;
@ -377,7 +387,7 @@ struct adv7511 {
u8 num_dsi_lanes;
bool use_timing_gen;
enum adv7511_type type;
const struct adv7511_chip_info *info;
struct platform_device *audio_pdev;
struct cec_adapter *cec_adap;

View File

@ -33,7 +33,7 @@ static const u8 ADV7511_REG_CEC_RX_FRAME_LEN[] = {
static void adv_cec_tx_raw_status(struct adv7511 *adv7511, u8 tx_raw_status)
{
unsigned int offset = adv7511->reg_cec_offset;
unsigned int offset = adv7511->info->reg_cec_offset;
unsigned int val;
if (regmap_read(adv7511->regmap_cec,
@ -84,7 +84,7 @@ static void adv_cec_tx_raw_status(struct adv7511 *adv7511, u8 tx_raw_status)
static void adv7511_cec_rx(struct adv7511 *adv7511, int rx_buf)
{
unsigned int offset = adv7511->reg_cec_offset;
unsigned int offset = adv7511->info->reg_cec_offset;
struct cec_msg msg = {};
unsigned int len;
unsigned int val;
@ -121,7 +121,7 @@ static void adv7511_cec_rx(struct adv7511 *adv7511, int rx_buf)
void adv7511_cec_irq_process(struct adv7511 *adv7511, unsigned int irq1)
{
unsigned int offset = adv7511->reg_cec_offset;
unsigned int offset = adv7511->info->reg_cec_offset;
const u32 irq_tx_mask = ADV7511_INT1_CEC_TX_READY |
ADV7511_INT1_CEC_TX_ARBIT_LOST |
ADV7511_INT1_CEC_TX_RETRY_TIMEOUT;
@ -177,7 +177,7 @@ void adv7511_cec_irq_process(struct adv7511 *adv7511, unsigned int irq1)
static int adv7511_cec_adap_enable(struct cec_adapter *adap, bool enable)
{
struct adv7511 *adv7511 = cec_get_drvdata(adap);
unsigned int offset = adv7511->reg_cec_offset;
unsigned int offset = adv7511->info->reg_cec_offset;
if (adv7511->i2c_cec == NULL)
return -EIO;
@ -223,7 +223,7 @@ static int adv7511_cec_adap_enable(struct cec_adapter *adap, bool enable)
static int adv7511_cec_adap_log_addr(struct cec_adapter *adap, u8 addr)
{
struct adv7511 *adv7511 = cec_get_drvdata(adap);
unsigned int offset = adv7511->reg_cec_offset;
unsigned int offset = adv7511->info->reg_cec_offset;
unsigned int i, free_idx = ADV7511_MAX_ADDRS;
if (!adv7511->cec_enabled_adap)
@ -292,7 +292,7 @@ static int adv7511_cec_adap_transmit(struct cec_adapter *adap, u8 attempts,
u32 signal_free_time, struct cec_msg *msg)
{
struct adv7511 *adv7511 = cec_get_drvdata(adap);
unsigned int offset = adv7511->reg_cec_offset;
unsigned int offset = adv7511->info->reg_cec_offset;
u8 len = msg->len;
unsigned int i;
@ -345,7 +345,7 @@ static int adv7511_cec_parse_dt(struct device *dev, struct adv7511 *adv7511)
int adv7511_cec_init(struct device *dev, struct adv7511 *adv7511)
{
unsigned int offset = adv7511->reg_cec_offset;
unsigned int offset = adv7511->info->reg_cec_offset;
int ret = adv7511_cec_parse_dt(dev, adv7511);
if (ret)

View File

@ -354,7 +354,7 @@ static void __adv7511_power_on(struct adv7511 *adv7511)
* first few seconds after enabling the output. On the other hand
* adv7535 require to enable HPD Override bit for proper HPD.
*/
if (adv7511->type == ADV7535)
if (adv7511->info->hpd_override_enable)
regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER2,
ADV7535_REG_POWER2_HPD_OVERRIDE,
ADV7535_REG_POWER2_HPD_OVERRIDE);
@ -373,7 +373,7 @@ static void adv7511_power_on(struct adv7511 *adv7511)
*/
regcache_sync(adv7511->regmap);
if (adv7511->type == ADV7533 || adv7511->type == ADV7535)
if (adv7511->info->has_dsi)
adv7533_dsi_power_on(adv7511);
adv7511->powered = true;
}
@ -381,7 +381,7 @@ static void adv7511_power_on(struct adv7511 *adv7511)
static void __adv7511_power_off(struct adv7511 *adv7511)
{
/* TODO: setup additional power down modes */
if (adv7511->type == ADV7535)
if (adv7511->info->hpd_override_enable)
regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER2,
ADV7535_REG_POWER2_HPD_OVERRIDE, 0);
@ -397,7 +397,7 @@ static void __adv7511_power_off(struct adv7511 *adv7511)
static void adv7511_power_off(struct adv7511 *adv7511)
{
__adv7511_power_off(adv7511);
if (adv7511->type == ADV7533 || adv7511->type == ADV7535)
if (adv7511->info->has_dsi)
adv7533_dsi_power_off(adv7511);
adv7511->powered = false;
}
@ -682,7 +682,7 @@ adv7511_detect(struct adv7511 *adv7511, struct drm_connector *connector)
status = connector_status_disconnected;
} else {
/* Renable HPD sensing */
if (adv7511->type == ADV7535)
if (adv7511->info->hpd_override_enable)
regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER2,
ADV7535_REG_POWER2_HPD_OVERRIDE,
ADV7535_REG_POWER2_HPD_OVERRIDE);
@ -786,7 +786,7 @@ static void adv7511_mode_set(struct adv7511 *adv7511,
else
low_refresh_rate = ADV7511_LOW_REFRESH_RATE_NONE;
if (adv7511->type == ADV7511)
if (adv7511->info->type == ADV7511)
regmap_update_bits(adv7511->regmap, 0xfb,
0x6, low_refresh_rate << 1);
else
@ -921,7 +921,7 @@ static enum drm_mode_status adv7511_bridge_mode_valid(struct drm_bridge *bridge,
{
struct adv7511 *adv = bridge_to_adv7511(bridge);
if (adv->type == ADV7533 || adv->type == ADV7535)
if (adv->info->has_dsi)
return adv7533_mode_valid(adv, mode);
else
return adv7511_mode_valid(adv, mode);
@ -1004,37 +1004,30 @@ static const char * const adv7533_supply_names[] = {
static int adv7511_init_regulators(struct adv7511 *adv)
{
const char * const *supply_names = adv->info->supply_names;
unsigned int num_supplies = adv->info->num_supplies;
struct device *dev = &adv->i2c_main->dev;
const char * const *supply_names;
unsigned int i;
int ret;
if (adv->type == ADV7511) {
supply_names = adv7511_supply_names;
adv->num_supplies = ARRAY_SIZE(adv7511_supply_names);
} else {
supply_names = adv7533_supply_names;
adv->num_supplies = ARRAY_SIZE(adv7533_supply_names);
}
adv->supplies = devm_kcalloc(dev, adv->num_supplies,
adv->supplies = devm_kcalloc(dev, num_supplies,
sizeof(*adv->supplies), GFP_KERNEL);
if (!adv->supplies)
return -ENOMEM;
for (i = 0; i < adv->num_supplies; i++)
for (i = 0; i < num_supplies; i++)
adv->supplies[i].supply = supply_names[i];
ret = devm_regulator_bulk_get(dev, adv->num_supplies, adv->supplies);
ret = devm_regulator_bulk_get(dev, num_supplies, adv->supplies);
if (ret)
return ret;
return regulator_bulk_enable(adv->num_supplies, adv->supplies);
return regulator_bulk_enable(num_supplies, adv->supplies);
}
static void adv7511_uninit_regulators(struct adv7511 *adv)
{
regulator_bulk_disable(adv->num_supplies, adv->supplies);
regulator_bulk_disable(adv->info->num_supplies, adv->supplies);
}
static bool adv7511_cec_register_volatile(struct device *dev, unsigned int reg)
@ -1042,7 +1035,7 @@ static bool adv7511_cec_register_volatile(struct device *dev, unsigned int reg)
struct i2c_client *i2c = to_i2c_client(dev);
struct adv7511 *adv7511 = i2c_get_clientdata(i2c);
reg -= adv7511->reg_cec_offset;
reg -= adv7511->info->reg_cec_offset;
switch (reg) {
case ADV7511_REG_CEC_RX1_FRAME_HDR:
@ -1093,12 +1086,10 @@ static int adv7511_init_cec_regmap(struct adv7511 *adv)
goto err;
}
if (adv->type == ADV7533 || adv->type == ADV7535) {
if (adv->info->reg_cec_offset == ADV7533_REG_CEC_OFFSET) {
ret = adv7533_patch_cec_registers(adv);
if (ret)
goto err;
adv->reg_cec_offset = ADV7533_REG_CEC_OFFSET;
}
return 0;
@ -1192,7 +1183,6 @@ static int adv7511_parse_dt(struct device_node *np,
static int adv7511_probe(struct i2c_client *i2c)
{
const struct i2c_device_id *id = i2c_client_get_device_id(i2c);
struct adv7511_link_config link_config;
struct adv7511 *adv7511;
struct device *dev = &i2c->dev;
@ -1209,15 +1199,11 @@ static int adv7511_probe(struct i2c_client *i2c)
adv7511->i2c_main = i2c;
adv7511->powered = false;
adv7511->status = connector_status_disconnected;
if (dev->of_node)
adv7511->type = (enum adv7511_type)of_device_get_match_data(dev);
else
adv7511->type = id->driver_data;
adv7511->info = i2c_get_match_data(i2c);
memset(&link_config, 0, sizeof(link_config));
if (adv7511->type == ADV7511)
if (adv7511->info->link_config)
ret = adv7511_parse_dt(dev->of_node, &link_config);
else
ret = adv7533_parse_dt(dev->of_node, adv7511);
@ -1254,7 +1240,7 @@ static int adv7511_probe(struct i2c_client *i2c)
goto uninit_regulators;
dev_dbg(dev, "Rev. %d\n", val);
if (adv7511->type == ADV7511)
if (adv7511->info->type == ADV7511)
ret = regmap_register_patch(adv7511->regmap,
adv7511_fixed_registers,
ARRAY_SIZE(adv7511_fixed_registers));
@ -1306,7 +1292,7 @@ static int adv7511_probe(struct i2c_client *i2c)
i2c_set_clientdata(i2c, adv7511);
if (adv7511->type == ADV7511)
if (adv7511->info->link_config)
adv7511_set_link_config(adv7511, &link_config);
ret = adv7511_cec_init(dev, adv7511);
@ -1325,7 +1311,7 @@ static int adv7511_probe(struct i2c_client *i2c)
adv7511_audio_init(dev, adv7511);
if (adv7511->type == ADV7533 || adv7511->type == ADV7535) {
if (adv7511->info->has_dsi) {
ret = adv7533_attach_dsi(adv7511);
if (ret)
goto err_unregister_audio;
@ -1368,22 +1354,50 @@ static void adv7511_remove(struct i2c_client *i2c)
i2c_unregister_device(adv7511->i2c_edid);
}
static const struct adv7511_chip_info adv7511_chip_info = {
.type = ADV7511,
.supply_names = adv7511_supply_names,
.num_supplies = ARRAY_SIZE(adv7511_supply_names),
.link_config = true,
};
static const struct adv7511_chip_info adv7533_chip_info = {
.type = ADV7533,
.max_mode_clock_khz = 80000,
.max_lane_freq_khz = 800000,
.supply_names = adv7533_supply_names,
.num_supplies = ARRAY_SIZE(adv7533_supply_names),
.reg_cec_offset = ADV7533_REG_CEC_OFFSET,
.has_dsi = true,
};
static const struct adv7511_chip_info adv7535_chip_info = {
.type = ADV7535,
.max_mode_clock_khz = 148500,
.max_lane_freq_khz = 891000,
.supply_names = adv7533_supply_names,
.num_supplies = ARRAY_SIZE(adv7533_supply_names),
.reg_cec_offset = ADV7533_REG_CEC_OFFSET,
.has_dsi = true,
.hpd_override_enable = true,
};
static const struct i2c_device_id adv7511_i2c_ids[] = {
{ "adv7511", ADV7511 },
{ "adv7511w", ADV7511 },
{ "adv7513", ADV7511 },
{ "adv7533", ADV7533 },
{ "adv7535", ADV7535 },
{ "adv7511", (kernel_ulong_t)&adv7511_chip_info },
{ "adv7511w", (kernel_ulong_t)&adv7511_chip_info },
{ "adv7513", (kernel_ulong_t)&adv7511_chip_info },
{ "adv7533", (kernel_ulong_t)&adv7533_chip_info },
{ "adv7535", (kernel_ulong_t)&adv7535_chip_info },
{ }
};
MODULE_DEVICE_TABLE(i2c, adv7511_i2c_ids);
static const struct of_device_id adv7511_of_ids[] = {
{ .compatible = "adi,adv7511", .data = (void *)ADV7511 },
{ .compatible = "adi,adv7511w", .data = (void *)ADV7511 },
{ .compatible = "adi,adv7513", .data = (void *)ADV7511 },
{ .compatible = "adi,adv7533", .data = (void *)ADV7533 },
{ .compatible = "adi,adv7535", .data = (void *)ADV7535 },
{ .compatible = "adi,adv7511", .data = &adv7511_chip_info },
{ .compatible = "adi,adv7511w", .data = &adv7511_chip_info },
{ .compatible = "adi,adv7513", .data = &adv7511_chip_info },
{ .compatible = "adi,adv7533", .data = &adv7533_chip_info },
{ .compatible = "adi,adv7535", .data = &adv7535_chip_info },
{ }
};
MODULE_DEVICE_TABLE(of, adv7511_of_ids);

View File

@ -103,18 +103,15 @@ void adv7533_dsi_power_off(struct adv7511 *adv)
enum drm_mode_status adv7533_mode_valid(struct adv7511 *adv,
const struct drm_display_mode *mode)
{
unsigned long max_lane_freq;
struct mipi_dsi_device *dsi = adv->dsi;
u8 bpp = mipi_dsi_pixel_format_to_bpp(dsi->format);
/* Check max clock for either 7533 or 7535 */
if (mode->clock > (adv->type == ADV7533 ? 80000 : 148500))
if (mode->clock > adv->info->max_mode_clock_khz)
return MODE_CLOCK_HIGH;
/* Check max clock for each lane */
max_lane_freq = (adv->type == ADV7533 ? 800000 : 891000);
if (mode->clock * bpp > max_lane_freq * adv->num_dsi_lanes)
if (mode->clock * bpp > adv->info->max_lane_freq_khz * adv->num_dsi_lanes)
return MODE_CLOCK_HIGH;
return MODE_OK;

View File

@ -49,4 +49,15 @@ config DRM_IMX8QXP_PIXEL_LINK_TO_DPI
Choose this to enable pixel link to display pixel interface(PXL2DPI)
found in Freescale i.MX8qxp processor.
config DRM_IMX93_MIPI_DSI
tristate "Freescale i.MX93 specific extensions for Synopsys DW MIPI DSI"
depends on OF
depends on COMMON_CLK
select DRM_DW_MIPI_DSI
select GENERIC_PHY
select GENERIC_PHY_MIPI_DPHY
help
Choose this to enable MIPI DSI controller found in Freescale i.MX93
processor.
endif # ARCH_MXC || COMPILE_TEST

View File

@ -4,3 +4,4 @@ obj-$(CONFIG_DRM_IMX8QXP_LDB) += imx8qxp-ldb.o
obj-$(CONFIG_DRM_IMX8QXP_PIXEL_COMBINER) += imx8qxp-pixel-combiner.o
obj-$(CONFIG_DRM_IMX8QXP_PIXEL_LINK) += imx8qxp-pixel-link.o
obj-$(CONFIG_DRM_IMX8QXP_PIXEL_LINK_TO_DPI) += imx8qxp-pxl2dpi.o
obj-$(CONFIG_DRM_IMX93_MIPI_DSI) += imx93-mipi-dsi.o

View File

@ -0,0 +1,917 @@
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright 2022,2023 NXP
*/
#include <linux/bitfield.h>
#include <linux/bits.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/math.h>
#include <linux/media-bus-format.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/phy/phy.h>
#include <linux/phy/phy-mipi-dphy.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <drm/bridge/dw_mipi_dsi.h>
#include <drm/drm_bridge.h>
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_modes.h>
/* DPHY PLL configuration registers */
#define DSI_REG 0x4c
#define CFGCLKFREQRANGE_MASK GENMASK(5, 0)
#define CFGCLKFREQRANGE(x) FIELD_PREP(CFGCLKFREQRANGE_MASK, (x))
#define CLKSEL_MASK GENMASK(7, 6)
#define CLKSEL_STOP FIELD_PREP(CLKSEL_MASK, 0)
#define CLKSEL_GEN FIELD_PREP(CLKSEL_MASK, 1)
#define CLKSEL_EXT FIELD_PREP(CLKSEL_MASK, 2)
#define HSFREQRANGE_MASK GENMASK(14, 8)
#define HSFREQRANGE(x) FIELD_PREP(HSFREQRANGE_MASK, (x))
#define UPDATE_PLL BIT(17)
#define SHADOW_CLR BIT(18)
#define CLK_EXT BIT(19)
#define DSI_WRITE_REG0 0x50
#define M_MASK GENMASK(9, 0)
#define M(x) FIELD_PREP(M_MASK, ((x) - 2))
#define N_MASK GENMASK(13, 10)
#define N(x) FIELD_PREP(N_MASK, ((x) - 1))
#define VCO_CTRL_MASK GENMASK(19, 14)
#define VCO_CTRL(x) FIELD_PREP(VCO_CTRL_MASK, (x))
#define PROP_CTRL_MASK GENMASK(25, 20)
#define PROP_CTRL(x) FIELD_PREP(PROP_CTRL_MASK, (x))
#define INT_CTRL_MASK GENMASK(31, 26)
#define INT_CTRL(x) FIELD_PREP(INT_CTRL_MASK, (x))
#define DSI_WRITE_REG1 0x54
#define GMP_CTRL_MASK GENMASK(1, 0)
#define GMP_CTRL(x) FIELD_PREP(GMP_CTRL_MASK, (x))
#define CPBIAS_CTRL_MASK GENMASK(8, 2)
#define CPBIAS_CTRL(x) FIELD_PREP(CPBIAS_CTRL_MASK, (x))
#define PLL_SHADOW_CTRL BIT(9)
/* display mux control register */
#define DISPLAY_MUX 0x60
#define MIPI_DSI_RGB666_MAP_CFG GENMASK(7, 6)
#define RGB666_CONFIG1 FIELD_PREP(MIPI_DSI_RGB666_MAP_CFG, 0)
#define RGB666_CONFIG2 FIELD_PREP(MIPI_DSI_RGB666_MAP_CFG, 1)
#define MIPI_DSI_RGB565_MAP_CFG GENMASK(5, 4)
#define RGB565_CONFIG1 FIELD_PREP(MIPI_DSI_RGB565_MAP_CFG, 0)
#define RGB565_CONFIG2 FIELD_PREP(MIPI_DSI_RGB565_MAP_CFG, 1)
#define RGB565_CONFIG3 FIELD_PREP(MIPI_DSI_RGB565_MAP_CFG, 2)
#define LCDIF_CROSS_LINE_PATTERN GENMASK(3, 0)
#define RGB888_TO_RGB888 FIELD_PREP(LCDIF_CROSS_LINE_PATTERN, 0)
#define RGB888_TO_RGB666 FIELD_PREP(LCDIF_CROSS_LINE_PATTERN, 6)
#define RGB565_TO_RGB565 FIELD_PREP(LCDIF_CROSS_LINE_PATTERN, 7)
#define MHZ(x) ((x) * 1000000UL)
#define REF_CLK_RATE_MAX MHZ(64)
#define REF_CLK_RATE_MIN MHZ(2)
#define FOUT_MAX MHZ(1250)
#define FOUT_MIN MHZ(40)
#define FVCO_DIV_FACTOR MHZ(80)
#define MBPS(x) ((x) * 1000000UL)
#define DATA_RATE_MAX_SPEED MBPS(2500)
#define DATA_RATE_MIN_SPEED MBPS(80)
#define M_MAX 625UL
#define M_MIN 64UL
#define N_MAX 16U
#define N_MIN 1U
struct imx93_dsi {
struct device *dev;
struct regmap *regmap;
struct clk *clk_pixel;
struct clk *clk_ref;
struct clk *clk_cfg;
struct dw_mipi_dsi *dmd;
struct dw_mipi_dsi_plat_data pdata;
union phy_configure_opts phy_cfg;
unsigned long ref_clk_rate;
u32 format;
};
struct dphy_pll_cfg {
u32 m; /* PLL Feedback Multiplication Ratio */
u32 n; /* PLL Input Frequency Division Ratio */
};
struct dphy_pll_vco_prop {
unsigned long max_fout;
u8 vco_cntl;
u8 prop_cntl;
};
struct dphy_pll_hsfreqrange {
unsigned long max_mbps;
u8 hsfreqrange;
};
/* DPHY Databook Table 3-13 Charge-pump Programmability */
static const struct dphy_pll_vco_prop vco_prop_map[] = {
{ 55, 0x3f, 0x0d },
{ 82, 0x37, 0x0d },
{ 110, 0x2f, 0x0d },
{ 165, 0x27, 0x0d },
{ 220, 0x1f, 0x0d },
{ 330, 0x17, 0x0d },
{ 440, 0x0f, 0x0d },
{ 660, 0x07, 0x0d },
{ 1149, 0x03, 0x0d },
{ 1152, 0x01, 0x0d },
{ 1250, 0x01, 0x0e },
};
/* DPHY Databook Table 5-7 Frequency Ranges and Defaults */
static const struct dphy_pll_hsfreqrange hsfreqrange_map[] = {
{ 89, 0x00 },
{ 99, 0x10 },
{ 109, 0x20 },
{ 119, 0x30 },
{ 129, 0x01 },
{ 139, 0x11 },
{ 149, 0x21 },
{ 159, 0x31 },
{ 169, 0x02 },
{ 179, 0x12 },
{ 189, 0x22 },
{ 204, 0x32 },
{ 219, 0x03 },
{ 234, 0x13 },
{ 249, 0x23 },
{ 274, 0x33 },
{ 299, 0x04 },
{ 324, 0x14 },
{ 349, 0x25 },
{ 399, 0x35 },
{ 449, 0x05 },
{ 499, 0x16 },
{ 549, 0x26 },
{ 599, 0x37 },
{ 649, 0x07 },
{ 699, 0x18 },
{ 749, 0x28 },
{ 799, 0x39 },
{ 849, 0x09 },
{ 899, 0x19 },
{ 949, 0x29 },
{ 999, 0x3a },
{ 1049, 0x0a },
{ 1099, 0x1a },
{ 1149, 0x2a },
{ 1199, 0x3b },
{ 1249, 0x0b },
{ 1299, 0x1b },
{ 1349, 0x2b },
{ 1399, 0x3c },
{ 1449, 0x0c },
{ 1499, 0x1c },
{ 1549, 0x2c },
{ 1599, 0x3d },
{ 1649, 0x0d },
{ 1699, 0x1d },
{ 1749, 0x2e },
{ 1799, 0x3e },
{ 1849, 0x0e },
{ 1899, 0x1e },
{ 1949, 0x2f },
{ 1999, 0x3f },
{ 2049, 0x0f },
{ 2099, 0x40 },
{ 2149, 0x41 },
{ 2199, 0x42 },
{ 2249, 0x43 },
{ 2299, 0x44 },
{ 2349, 0x45 },
{ 2399, 0x46 },
{ 2449, 0x47 },
{ 2499, 0x48 },
{ 2500, 0x49 },
};
static void dphy_pll_write(struct imx93_dsi *dsi, unsigned int reg, u32 value)
{
int ret;
ret = regmap_write(dsi->regmap, reg, value);
if (ret < 0)
dev_err(dsi->dev, "failed to write 0x%08x to pll reg 0x%x: %d\n",
value, reg, ret);
}
static inline unsigned long data_rate_to_fout(unsigned long data_rate)
{
/* Fout is half of data rate */
return data_rate / 2;
}
static int
dphy_pll_get_configure_from_opts(struct imx93_dsi *dsi,
struct phy_configure_opts_mipi_dphy *dphy_opts,
struct dphy_pll_cfg *cfg)
{
struct device *dev = dsi->dev;
unsigned long fin = dsi->ref_clk_rate;
unsigned long fout;
unsigned long best_fout = 0;
unsigned int fvco_div;
unsigned int min_n, max_n, n, best_n;
unsigned long m, best_m;
unsigned long min_delta = ULONG_MAX;
unsigned long delta;
u64 tmp;
if (dphy_opts->hs_clk_rate < DATA_RATE_MIN_SPEED ||
dphy_opts->hs_clk_rate > DATA_RATE_MAX_SPEED) {
dev_dbg(dev, "invalid data rate per lane: %lu\n",
dphy_opts->hs_clk_rate);
return -EINVAL;
}
fout = data_rate_to_fout(dphy_opts->hs_clk_rate);
/* DPHY Databook 3.3.6.1 Output Frequency */
/* Fout = Fvco / Fvco_div = (Fin * M) / (Fvco_div * N) */
/* Fvco_div could be 1/2/4/8 according to Fout range. */
fvco_div = 8UL / min(DIV_ROUND_UP(fout, FVCO_DIV_FACTOR), 8UL);
/* limitation: 2MHz <= Fin / N <= 8MHz */
min_n = DIV_ROUND_UP_ULL((u64)fin, MHZ(8));
max_n = DIV_ROUND_DOWN_ULL((u64)fin, MHZ(2));
/* clamp possible N(s) */
min_n = clamp(min_n, N_MIN, N_MAX);
max_n = clamp(max_n, N_MIN, N_MAX);
dev_dbg(dev, "Fout = %lu, Fvco_div = %u, n_range = [%u, %u]\n",
fout, fvco_div, min_n, max_n);
for (n = min_n; n <= max_n; n++) {
/* M = (Fout * N * Fvco_div) / Fin */
m = DIV_ROUND_CLOSEST(fout * n * fvco_div, fin);
/* check M range */
if (m < M_MIN || m > M_MAX)
continue;
/* calculate temporary Fout */
tmp = m * fin;
do_div(tmp, n * fvco_div);
if (tmp < FOUT_MIN || tmp > FOUT_MAX)
continue;
delta = abs(fout - tmp);
if (delta < min_delta) {
best_n = n;
best_m = m;
min_delta = delta;
best_fout = tmp;
}
}
if (best_fout) {
cfg->m = best_m;
cfg->n = best_n;
dev_dbg(dev, "best Fout = %lu, m = %u, n = %u\n",
best_fout, cfg->m, cfg->n);
} else {
dev_dbg(dev, "failed to find best Fout\n");
return -EINVAL;
}
return 0;
}
static void dphy_pll_clear_shadow(struct imx93_dsi *dsi)
{
/* Reference DPHY Databook Figure 3-3 Initialization Timing Diagram. */
/* Select clock generation first. */
dphy_pll_write(dsi, DSI_REG, CLKSEL_GEN);
/* Clear shadow after clock selection is done a while. */
fsleep(1);
dphy_pll_write(dsi, DSI_REG, CLKSEL_GEN | SHADOW_CLR);
/* A minimum pulse of 5ns on shadow_clear signal. */
fsleep(1);
dphy_pll_write(dsi, DSI_REG, CLKSEL_GEN);
}
static unsigned long dphy_pll_get_cfgclkrange(struct imx93_dsi *dsi)
{
/*
* DPHY Databook Table 4-4 System Control Signals mentions an equation
* for cfgclkfreqrange[5:0].
*/
return (clk_get_rate(dsi->clk_cfg) / MHZ(1) - 17) * 4;
}
static u8
dphy_pll_get_hsfreqrange(struct phy_configure_opts_mipi_dphy *dphy_opts)
{
unsigned long mbps = dphy_opts->hs_clk_rate / MHZ(1);
int i;
for (i = 0; i < ARRAY_SIZE(hsfreqrange_map); i++)
if (mbps <= hsfreqrange_map[i].max_mbps)
return hsfreqrange_map[i].hsfreqrange;
return 0;
}
static u8 dphy_pll_get_vco(struct phy_configure_opts_mipi_dphy *dphy_opts)
{
unsigned long fout = data_rate_to_fout(dphy_opts->hs_clk_rate) / MHZ(1);
int i;
for (i = 0; i < ARRAY_SIZE(vco_prop_map); i++)
if (fout <= vco_prop_map[i].max_fout)
return vco_prop_map[i].vco_cntl;
return 0;
}
static u8 dphy_pll_get_prop(struct phy_configure_opts_mipi_dphy *dphy_opts)
{
unsigned long fout = data_rate_to_fout(dphy_opts->hs_clk_rate) / MHZ(1);
int i;
for (i = 0; i < ARRAY_SIZE(vco_prop_map); i++)
if (fout <= vco_prop_map[i].max_fout)
return vco_prop_map[i].prop_cntl;
return 0;
}
static int dphy_pll_update(struct imx93_dsi *dsi)
{
int ret;
ret = regmap_update_bits(dsi->regmap, DSI_REG, UPDATE_PLL, UPDATE_PLL);
if (ret < 0) {
dev_err(dsi->dev, "failed to set UPDATE_PLL: %d\n", ret);
return ret;
}
/*
* The updatepll signal should be asserted for a minimum of four clkin
* cycles, according to DPHY Databook Figure 3-3 Initialization Timing
* Diagram.
*/
fsleep(10);
ret = regmap_update_bits(dsi->regmap, DSI_REG, UPDATE_PLL, 0);
if (ret < 0) {
dev_err(dsi->dev, "failed to clear UPDATE_PLL: %d\n", ret);
return ret;
}
return 0;
}
static int dphy_pll_configure(struct imx93_dsi *dsi, union phy_configure_opts *opts)
{
struct dphy_pll_cfg cfg = { 0 };
u32 val;
int ret;
ret = dphy_pll_get_configure_from_opts(dsi, &opts->mipi_dphy, &cfg);
if (ret) {
dev_err(dsi->dev, "failed to get phy pll cfg %d\n", ret);
return ret;
}
dphy_pll_clear_shadow(dsi);
/* DSI_REG */
val = CLKSEL_GEN |
CFGCLKFREQRANGE(dphy_pll_get_cfgclkrange(dsi)) |
HSFREQRANGE(dphy_pll_get_hsfreqrange(&opts->mipi_dphy));
dphy_pll_write(dsi, DSI_REG, val);
/* DSI_WRITE_REG0 */
val = M(cfg.m) | N(cfg.n) | INT_CTRL(0) |
VCO_CTRL(dphy_pll_get_vco(&opts->mipi_dphy)) |
PROP_CTRL(dphy_pll_get_prop(&opts->mipi_dphy));
dphy_pll_write(dsi, DSI_WRITE_REG0, val);
/* DSI_WRITE_REG1 */
dphy_pll_write(dsi, DSI_WRITE_REG1, GMP_CTRL(1) | CPBIAS_CTRL(0x10));
ret = clk_prepare_enable(dsi->clk_ref);
if (ret < 0) {
dev_err(dsi->dev, "failed to enable ref clock: %d\n", ret);
return ret;
}
/*
* At least 10 refclk cycles are required before updatePLL assertion,
* according to DPHY Databook Figure 3-3 Initialization Timing Diagram.
*/
fsleep(10);
ret = dphy_pll_update(dsi);
if (ret < 0) {
clk_disable_unprepare(dsi->clk_ref);
return ret;
}
return 0;
}
static void dphy_pll_clear_reg(struct imx93_dsi *dsi)
{
dphy_pll_write(dsi, DSI_REG, 0);
dphy_pll_write(dsi, DSI_WRITE_REG0, 0);
dphy_pll_write(dsi, DSI_WRITE_REG1, 0);
}
static int dphy_pll_init(struct imx93_dsi *dsi)
{
int ret;
ret = clk_prepare_enable(dsi->clk_cfg);
if (ret < 0) {
dev_err(dsi->dev, "failed to enable config clock: %d\n", ret);
return ret;
}
dphy_pll_clear_reg(dsi);
return 0;
}
static void dphy_pll_uninit(struct imx93_dsi *dsi)
{
dphy_pll_clear_reg(dsi);
clk_disable_unprepare(dsi->clk_cfg);
}
static void dphy_pll_power_off(struct imx93_dsi *dsi)
{
dphy_pll_clear_reg(dsi);
clk_disable_unprepare(dsi->clk_ref);
}
static int imx93_dsi_get_phy_configure_opts(struct imx93_dsi *dsi,
const struct drm_display_mode *mode,
union phy_configure_opts *phy_cfg,
u32 lanes, u32 format)
{
struct device *dev = dsi->dev;
int bpp;
int ret;
bpp = mipi_dsi_pixel_format_to_bpp(format);
if (bpp < 0) {
dev_dbg(dev, "failed to get bpp for pixel format %d\n", format);
return -EINVAL;
}
ret = phy_mipi_dphy_get_default_config(mode->clock * MSEC_PER_SEC, bpp,
lanes, &phy_cfg->mipi_dphy);
if (ret < 0) {
dev_dbg(dev, "failed to get default phy cfg %d\n", ret);
return ret;
}
return 0;
}
static enum drm_mode_status
imx93_dsi_validate_mode(struct imx93_dsi *dsi, const struct drm_display_mode *mode)
{
struct drm_bridge *bridge = dw_mipi_dsi_get_bridge(dsi->dmd);
/* Get the last bridge */
while (drm_bridge_get_next_bridge(bridge))
bridge = drm_bridge_get_next_bridge(bridge);
if ((bridge->ops & DRM_BRIDGE_OP_DETECT) &&
(bridge->ops & DRM_BRIDGE_OP_EDID)) {
unsigned long pixel_clock_rate = mode->clock * 1000;
unsigned long rounded_rate;
/* Allow +/-0.5% pixel clock rate deviation */
rounded_rate = clk_round_rate(dsi->clk_pixel, pixel_clock_rate);
if (rounded_rate < pixel_clock_rate * 995 / 1000 ||
rounded_rate > pixel_clock_rate * 1005 / 1000) {
dev_dbg(dsi->dev, "failed to round clock for mode " DRM_MODE_FMT "\n",
DRM_MODE_ARG(mode));
return MODE_NOCLOCK;
}
}
return MODE_OK;
}
static enum drm_mode_status
imx93_dsi_validate_phy(struct imx93_dsi *dsi, const struct drm_display_mode *mode,
unsigned long mode_flags, u32 lanes, u32 format)
{
union phy_configure_opts phy_cfg;
struct dphy_pll_cfg cfg = { 0 };
struct device *dev = dsi->dev;
int ret;
ret = imx93_dsi_get_phy_configure_opts(dsi, mode, &phy_cfg, lanes,
format);
if (ret < 0) {
dev_dbg(dev, "failed to get phy cfg opts %d\n", ret);
return MODE_ERROR;
}
ret = dphy_pll_get_configure_from_opts(dsi, &phy_cfg.mipi_dphy, &cfg);
if (ret < 0) {
dev_dbg(dev, "failed to get phy pll cfg %d\n", ret);
return MODE_NOCLOCK;
}
return MODE_OK;
}
static enum drm_mode_status
imx93_dsi_mode_valid(void *priv_data, const struct drm_display_mode *mode,
unsigned long mode_flags, u32 lanes, u32 format)
{
struct imx93_dsi *dsi = priv_data;
struct device *dev = dsi->dev;
enum drm_mode_status ret;
ret = imx93_dsi_validate_mode(dsi, mode);
if (ret != MODE_OK) {
dev_dbg(dev, "failed to validate mode " DRM_MODE_FMT "\n",
DRM_MODE_ARG(mode));
return ret;
}
ret = imx93_dsi_validate_phy(dsi, mode, mode_flags, lanes, format);
if (ret != MODE_OK) {
dev_dbg(dev, "failed to validate phy for mode " DRM_MODE_FMT "\n",
DRM_MODE_ARG(mode));
return ret;
}
return MODE_OK;
}
static bool imx93_dsi_mode_fixup(void *priv_data,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct imx93_dsi *dsi = priv_data;
unsigned long pixel_clock_rate;
unsigned long rounded_rate;
pixel_clock_rate = mode->clock * 1000;
rounded_rate = clk_round_rate(dsi->clk_pixel, pixel_clock_rate);
memcpy(adjusted_mode, mode, sizeof(*mode));
adjusted_mode->clock = rounded_rate / 1000;
dev_dbg(dsi->dev, "adj clock %d for mode " DRM_MODE_FMT "\n",
adjusted_mode->clock, DRM_MODE_ARG(mode));
return true;
}
static u32 *imx93_dsi_get_input_bus_fmts(void *priv_data,
struct drm_bridge *bridge,
struct drm_bridge_state *bridge_state,
struct drm_crtc_state *crtc_state,
struct drm_connector_state *conn_state,
u32 output_fmt,
unsigned int *num_input_fmts)
{
u32 *input_fmts, input_fmt;
*num_input_fmts = 0;
switch (output_fmt) {
case MEDIA_BUS_FMT_RGB888_1X24:
case MEDIA_BUS_FMT_RGB666_1X18:
case MEDIA_BUS_FMT_FIXED:
input_fmt = MEDIA_BUS_FMT_RGB888_1X24;
break;
case MEDIA_BUS_FMT_RGB565_1X16:
input_fmt = output_fmt;
break;
default:
return NULL;
}
input_fmts = kmalloc(sizeof(*input_fmts), GFP_KERNEL);
if (!input_fmts)
return NULL;
input_fmts[0] = input_fmt;
*num_input_fmts = 1;
return input_fmts;
}
static int imx93_dsi_phy_init(void *priv_data)
{
struct imx93_dsi *dsi = priv_data;
unsigned int fmt = 0;
int ret;
switch (dsi->format) {
case MIPI_DSI_FMT_RGB888:
fmt = RGB888_TO_RGB888;
break;
case MIPI_DSI_FMT_RGB666:
fmt = RGB888_TO_RGB666;
regmap_update_bits(dsi->regmap, DISPLAY_MUX,
MIPI_DSI_RGB666_MAP_CFG, RGB666_CONFIG2);
break;
case MIPI_DSI_FMT_RGB666_PACKED:
fmt = RGB888_TO_RGB666;
regmap_update_bits(dsi->regmap, DISPLAY_MUX,
MIPI_DSI_RGB666_MAP_CFG, RGB666_CONFIG1);
break;
case MIPI_DSI_FMT_RGB565:
fmt = RGB565_TO_RGB565;
regmap_update_bits(dsi->regmap, DISPLAY_MUX,
MIPI_DSI_RGB565_MAP_CFG, RGB565_CONFIG1);
break;
}
regmap_update_bits(dsi->regmap, DISPLAY_MUX, LCDIF_CROSS_LINE_PATTERN, fmt);
ret = dphy_pll_init(dsi);
if (ret < 0) {
dev_err(dsi->dev, "failed to init phy pll: %d\n", ret);
return ret;
}
ret = dphy_pll_configure(dsi, &dsi->phy_cfg);
if (ret < 0) {
dev_err(dsi->dev, "failed to configure phy pll: %d\n", ret);
dphy_pll_uninit(dsi);
return ret;
}
return 0;
}
static void imx93_dsi_phy_power_off(void *priv_data)
{
struct imx93_dsi *dsi = priv_data;
dphy_pll_power_off(dsi);
dphy_pll_uninit(dsi);
}
static int
imx93_dsi_get_lane_mbps(void *priv_data, const struct drm_display_mode *mode,
unsigned long mode_flags, u32 lanes, u32 format,
unsigned int *lane_mbps)
{
struct imx93_dsi *dsi = priv_data;
union phy_configure_opts phy_cfg;
struct device *dev = dsi->dev;
int ret;
ret = imx93_dsi_get_phy_configure_opts(dsi, mode, &phy_cfg, lanes,
format);
if (ret < 0) {
dev_dbg(dev, "failed to get phy cfg opts %d\n", ret);
return ret;
}
*lane_mbps = DIV_ROUND_UP(phy_cfg.mipi_dphy.hs_clk_rate, USEC_PER_SEC);
memcpy(&dsi->phy_cfg, &phy_cfg, sizeof(phy_cfg));
dev_dbg(dev, "get lane_mbps %u for mode " DRM_MODE_FMT "\n",
*lane_mbps, DRM_MODE_ARG(mode));
return 0;
}
/* High-Speed Transition Times */
struct hstt {
unsigned int maxfreq;
struct dw_mipi_dsi_dphy_timing timing;
};
#define HSTT(_maxfreq, _c_lp2hs, _c_hs2lp, _d_lp2hs, _d_hs2lp) \
{ \
.maxfreq = (_maxfreq), \
.timing = { \
.clk_lp2hs = (_c_lp2hs), \
.clk_hs2lp = (_c_hs2lp), \
.data_lp2hs = (_d_lp2hs), \
.data_hs2lp = (_d_hs2lp), \
} \
}
/* DPHY Databook Table A-4 High-Speed Transition Times */
static const struct hstt hstt_table[] = {
HSTT(80, 21, 17, 15, 10),
HSTT(90, 23, 17, 16, 10),
HSTT(100, 22, 17, 16, 10),
HSTT(110, 25, 18, 17, 11),
HSTT(120, 26, 20, 18, 11),
HSTT(130, 27, 19, 19, 11),
HSTT(140, 27, 19, 19, 11),
HSTT(150, 28, 20, 20, 12),
HSTT(160, 30, 21, 22, 13),
HSTT(170, 30, 21, 23, 13),
HSTT(180, 31, 21, 23, 13),
HSTT(190, 32, 22, 24, 13),
HSTT(205, 35, 22, 25, 13),
HSTT(220, 37, 26, 27, 15),
HSTT(235, 38, 28, 27, 16),
HSTT(250, 41, 29, 30, 17),
HSTT(275, 43, 29, 32, 18),
HSTT(300, 45, 32, 35, 19),
HSTT(325, 48, 33, 36, 18),
HSTT(350, 51, 35, 40, 20),
HSTT(400, 59, 37, 44, 21),
HSTT(450, 65, 40, 49, 23),
HSTT(500, 71, 41, 54, 24),
HSTT(550, 77, 44, 57, 26),
HSTT(600, 82, 46, 64, 27),
HSTT(650, 87, 48, 67, 28),
HSTT(700, 94, 52, 71, 29),
HSTT(750, 99, 52, 75, 31),
HSTT(800, 105, 55, 82, 32),
HSTT(850, 110, 58, 85, 32),
HSTT(900, 115, 58, 88, 35),
HSTT(950, 120, 62, 93, 36),
HSTT(1000, 128, 63, 99, 38),
HSTT(1050, 132, 65, 102, 38),
HSTT(1100, 138, 67, 106, 39),
HSTT(1150, 146, 69, 112, 42),
HSTT(1200, 151, 71, 117, 43),
HSTT(1250, 153, 74, 120, 45),
HSTT(1300, 160, 73, 124, 46),
HSTT(1350, 165, 76, 130, 47),
HSTT(1400, 172, 78, 134, 49),
HSTT(1450, 177, 80, 138, 49),
HSTT(1500, 183, 81, 143, 52),
HSTT(1550, 191, 84, 147, 52),
HSTT(1600, 194, 85, 152, 52),
HSTT(1650, 201, 86, 155, 53),
HSTT(1700, 208, 88, 161, 53),
HSTT(1750, 212, 89, 165, 53),
HSTT(1800, 220, 90, 171, 54),
HSTT(1850, 223, 92, 175, 54),
HSTT(1900, 231, 91, 180, 55),
HSTT(1950, 236, 95, 185, 56),
HSTT(2000, 243, 97, 190, 56),
HSTT(2050, 248, 99, 194, 58),
HSTT(2100, 252, 100, 199, 59),
HSTT(2150, 259, 102, 204, 61),
HSTT(2200, 266, 105, 210, 62),
HSTT(2250, 269, 109, 213, 63),
HSTT(2300, 272, 109, 217, 65),
HSTT(2350, 281, 112, 225, 66),
HSTT(2400, 283, 115, 226, 66),
HSTT(2450, 282, 115, 226, 67),
HSTT(2500, 281, 118, 227, 67),
};
static int imx93_dsi_phy_get_timing(void *priv_data, unsigned int lane_mbps,
struct dw_mipi_dsi_dphy_timing *timing)
{
struct imx93_dsi *dsi = priv_data;
struct device *dev = dsi->dev;
int i;
for (i = 0; i < ARRAY_SIZE(hstt_table); i++)
if (lane_mbps <= hstt_table[i].maxfreq)
break;
if (i == ARRAY_SIZE(hstt_table)) {
dev_err(dev, "failed to get phy timing for lane_mbps %u\n",
lane_mbps);
return -EINVAL;
}
*timing = hstt_table[i].timing;
dev_dbg(dev, "get phy timing for %u <= %u (lane_mbps)\n",
lane_mbps, hstt_table[i].maxfreq);
return 0;
}
static const struct dw_mipi_dsi_phy_ops imx93_dsi_phy_ops = {
.init = imx93_dsi_phy_init,
.power_off = imx93_dsi_phy_power_off,
.get_lane_mbps = imx93_dsi_get_lane_mbps,
.get_timing = imx93_dsi_phy_get_timing,
};
static int imx93_dsi_host_attach(void *priv_data, struct mipi_dsi_device *device)
{
struct imx93_dsi *dsi = priv_data;
dsi->format = device->format;
return 0;
}
static const struct dw_mipi_dsi_host_ops imx93_dsi_host_ops = {
.attach = imx93_dsi_host_attach,
};
static int imx93_dsi_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
struct imx93_dsi *dsi;
int ret;
dsi = devm_kzalloc(dev, sizeof(*dsi), GFP_KERNEL);
if (!dsi)
return -ENOMEM;
dsi->regmap = syscon_regmap_lookup_by_phandle(np, "fsl,media-blk-ctrl");
if (IS_ERR(dsi->regmap)) {
ret = PTR_ERR(dsi->regmap);
dev_err(dev, "failed to get block ctrl regmap: %d\n", ret);
return ret;
}
dsi->clk_pixel = devm_clk_get(dev, "pix");
if (IS_ERR(dsi->clk_pixel))
return dev_err_probe(dev, PTR_ERR(dsi->clk_pixel),
"failed to get pixel clock\n");
dsi->clk_cfg = devm_clk_get(dev, "phy_cfg");
if (IS_ERR(dsi->clk_cfg))
return dev_err_probe(dev, PTR_ERR(dsi->clk_cfg),
"failed to get phy cfg clock\n");
dsi->clk_ref = devm_clk_get(dev, "phy_ref");
if (IS_ERR(dsi->clk_ref))
return dev_err_probe(dev, PTR_ERR(dsi->clk_ref),
"failed to get phy ref clock\n");
dsi->ref_clk_rate = clk_get_rate(dsi->clk_ref);
if (dsi->ref_clk_rate < REF_CLK_RATE_MIN ||
dsi->ref_clk_rate > REF_CLK_RATE_MAX) {
dev_err(dev, "invalid phy ref clock rate %lu\n",
dsi->ref_clk_rate);
return -EINVAL;
}
dev_dbg(dev, "phy ref clock rate: %lu\n", dsi->ref_clk_rate);
dsi->dev = dev;
dsi->pdata.max_data_lanes = 4;
dsi->pdata.mode_valid = imx93_dsi_mode_valid;
dsi->pdata.mode_fixup = imx93_dsi_mode_fixup;
dsi->pdata.get_input_bus_fmts = imx93_dsi_get_input_bus_fmts;
dsi->pdata.phy_ops = &imx93_dsi_phy_ops;
dsi->pdata.host_ops = &imx93_dsi_host_ops;
dsi->pdata.priv_data = dsi;
platform_set_drvdata(pdev, dsi);
dsi->dmd = dw_mipi_dsi_probe(pdev, &dsi->pdata);
if (IS_ERR(dsi->dmd))
return dev_err_probe(dev, PTR_ERR(dsi->dmd),
"failed to probe dw_mipi_dsi\n");
return 0;
}
static void imx93_dsi_remove(struct platform_device *pdev)
{
struct imx93_dsi *dsi = platform_get_drvdata(pdev);
dw_mipi_dsi_remove(dsi->dmd);
}
static const struct of_device_id imx93_dsi_dt_ids[] = {
{ .compatible = "fsl,imx93-mipi-dsi", },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, imx93_dsi_dt_ids);
static struct platform_driver imx93_dsi_driver = {
.probe = imx93_dsi_probe,
.remove_new = imx93_dsi_remove,
.driver = {
.of_match_table = imx93_dsi_dt_ids,
.name = "imx93_mipi_dsi",
},
};
module_platform_driver(imx93_dsi_driver);
MODULE_DESCRIPTION("Freescale i.MX93 MIPI DSI driver");
MODULE_AUTHOR("Liu Ying <victor.liu@nxp.com>");
MODULE_LICENSE("GPL");

View File

@ -929,9 +929,9 @@ retry:
init_waitqueue_head(&lt9611uxc->wq);
INIT_WORK(&lt9611uxc->work, lt9611uxc_hpd_work);
ret = devm_request_threaded_irq(dev, client->irq, NULL,
lt9611uxc_irq_thread_handler,
IRQF_ONESHOT, "lt9611uxc", lt9611uxc);
ret = request_threaded_irq(client->irq, NULL,
lt9611uxc_irq_thread_handler,
IRQF_ONESHOT, "lt9611uxc", lt9611uxc);
if (ret) {
dev_err(dev, "failed to request irq\n");
goto err_disable_regulators;
@ -967,6 +967,8 @@ retry:
return lt9611uxc_audio_init(dev, lt9611uxc);
err_remove_bridge:
free_irq(client->irq, lt9611uxc);
cancel_work_sync(&lt9611uxc->work);
drm_bridge_remove(&lt9611uxc->bridge);
err_disable_regulators:
@ -983,7 +985,7 @@ static void lt9611uxc_remove(struct i2c_client *client)
{
struct lt9611uxc *lt9611uxc = i2c_get_clientdata(client);
disable_irq(client->irq);
free_irq(client->irq, lt9611uxc);
cancel_work_sync(&lt9611uxc->work);
lt9611uxc_audio_exit(lt9611uxc);
drm_bridge_remove(&lt9611uxc->bridge);

View File

@ -65,12 +65,11 @@ struct ge_b850v3_lvds {
static struct ge_b850v3_lvds *ge_b850v3_lvds_ptr;
static u8 *stdp2690_get_edid(struct i2c_client *client)
static int stdp2690_read_block(void *context, u8 *buf, unsigned int block, size_t len)
{
struct i2c_client *client = context;
struct i2c_adapter *adapter = client->adapter;
unsigned char start = 0x00;
unsigned int total_size;
u8 *block = kmalloc(EDID_LENGTH, GFP_KERNEL);
unsigned char start = block * EDID_LENGTH;
struct i2c_msg msgs[] = {
{
@ -81,53 +80,15 @@ static u8 *stdp2690_get_edid(struct i2c_client *client)
}, {
.addr = client->addr,
.flags = I2C_M_RD,
.len = EDID_LENGTH,
.buf = block,
.len = len,
.buf = buf,
}
};
if (!block)
return NULL;
if (i2c_transfer(adapter, msgs, 2) != 2)
return -1;
if (i2c_transfer(adapter, msgs, 2) != 2) {
DRM_ERROR("Unable to read EDID.\n");
goto err;
}
if (!drm_edid_block_valid(block, 0, false, NULL)) {
DRM_ERROR("Invalid EDID data\n");
goto err;
}
total_size = (block[EDID_EXT_BLOCK_CNT] + 1) * EDID_LENGTH;
if (total_size > EDID_LENGTH) {
kfree(block);
block = kmalloc(total_size, GFP_KERNEL);
if (!block)
return NULL;
/* Yes, read the entire buffer, and do not skip the first
* EDID_LENGTH bytes.
*/
start = 0x00;
msgs[1].len = total_size;
msgs[1].buf = block;
if (i2c_transfer(adapter, msgs, 2) != 2) {
DRM_ERROR("Unable to read EDID extension blocks.\n");
goto err;
}
if (!drm_edid_block_valid(block, 1, false, NULL)) {
DRM_ERROR("Invalid EDID data\n");
goto err;
}
}
return block;
err:
kfree(block);
return NULL;
return 0;
}
static struct edid *ge_b850v3_lvds_get_edid(struct drm_bridge *bridge,
@ -137,7 +98,7 @@ static struct edid *ge_b850v3_lvds_get_edid(struct drm_bridge *bridge,
client = ge_b850v3_lvds_ptr->stdp2690_i2c;
return (struct edid *)stdp2690_get_edid(client);
return drm_do_get_edid(connector, stdp2690_read_block, client);
}
static int ge_b850v3_lvds_get_modes(struct drm_connector *connector)

View File

@ -12,6 +12,8 @@
#include <linux/component.h>
#include <linux/debugfs.h>
#include <linux/iopoll.h>
#include <linux/math64.h>
#include <linux/media-bus-format.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
@ -22,6 +24,7 @@
#include <drm/bridge/dw_mipi_dsi.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
#include <drm/drm_connector.h>
#include <drm/drm_crtc.h>
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_modes.h>
@ -538,6 +541,59 @@ static const struct mipi_dsi_host_ops dw_mipi_dsi_host_ops = {
.transfer = dw_mipi_dsi_host_transfer,
};
static u32 *
dw_mipi_dsi_bridge_atomic_get_input_bus_fmts(struct drm_bridge *bridge,
struct drm_bridge_state *bridge_state,
struct drm_crtc_state *crtc_state,
struct drm_connector_state *conn_state,
u32 output_fmt,
unsigned int *num_input_fmts)
{
struct dw_mipi_dsi *dsi = bridge_to_dsi(bridge);
const struct dw_mipi_dsi_plat_data *pdata = dsi->plat_data;
u32 *input_fmts;
if (pdata->get_input_bus_fmts)
return pdata->get_input_bus_fmts(pdata->priv_data,
bridge, bridge_state,
crtc_state, conn_state,
output_fmt, num_input_fmts);
/* Fall back to MEDIA_BUS_FMT_FIXED as the only input format. */
input_fmts = kmalloc(sizeof(*input_fmts), GFP_KERNEL);
if (!input_fmts)
return NULL;
input_fmts[0] = MEDIA_BUS_FMT_FIXED;
*num_input_fmts = 1;
return input_fmts;
}
static int dw_mipi_dsi_bridge_atomic_check(struct drm_bridge *bridge,
struct drm_bridge_state *bridge_state,
struct drm_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
struct dw_mipi_dsi *dsi = bridge_to_dsi(bridge);
const struct dw_mipi_dsi_plat_data *pdata = dsi->plat_data;
bool ret;
bridge_state->input_bus_cfg.flags =
DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_SAMPLE_NEGEDGE;
if (pdata->mode_fixup) {
ret = pdata->mode_fixup(pdata->priv_data, &crtc_state->mode,
&crtc_state->adjusted_mode);
if (!ret) {
DRM_DEBUG_DRIVER("failed to fixup mode " DRM_MODE_FMT "\n",
DRM_MODE_ARG(&crtc_state->mode));
return -EINVAL;
}
}
return 0;
}
static void dw_mipi_dsi_video_mode_config(struct dw_mipi_dsi *dsi)
{
u32 val;
@ -630,7 +686,7 @@ static void dw_mipi_dsi_init(struct dw_mipi_dsi *dsi)
* timeout clock division should be computed with the
* high speed transmission counter timeout and byte lane...
*/
dsi_write(dsi, DSI_CLKMGR_CFG, TO_CLK_DIVISION(10) |
dsi_write(dsi, DSI_CLKMGR_CFG, TO_CLK_DIVISION(0) |
TX_ESC_CLK_DIVISION(esc_clk_division));
}
@ -693,7 +749,7 @@ static void dw_mipi_dsi_command_mode_config(struct dw_mipi_dsi *dsi)
* compute high speed transmission counter timeout according
* to the timeout clock division (TO_CLK_DIVISION) and byte lane...
*/
dsi_write(dsi, DSI_TO_CNT_CFG, HSTX_TO_CNT(1000) | LPRX_TO_CNT(1000));
dsi_write(dsi, DSI_TO_CNT_CFG, HSTX_TO_CNT(0) | LPRX_TO_CNT(0));
/*
* TODO dw drv improvements
* the Bus-Turn-Around Timeout Counter should be computed
@ -703,20 +759,45 @@ static void dw_mipi_dsi_command_mode_config(struct dw_mipi_dsi *dsi)
dsi_write(dsi, DSI_MODE_CFG, ENABLE_CMD_MODE);
}
static const u32 minimum_lbccs[] = {10, 5, 4, 3};
static inline u32 dw_mipi_dsi_get_minimum_lbcc(struct dw_mipi_dsi *dsi)
{
return minimum_lbccs[dsi->lanes - 1];
}
/* Get lane byte clock cycles. */
static u32 dw_mipi_dsi_get_hcomponent_lbcc(struct dw_mipi_dsi *dsi,
const struct drm_display_mode *mode,
u32 hcomponent)
{
u32 frac, lbcc;
u32 frac, lbcc, minimum_lbcc;
int bpp;
lbcc = hcomponent * dsi->lane_mbps * MSEC_PER_SEC / 8;
if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_BURST) {
/* lbcc based on lane_mbps */
lbcc = hcomponent * dsi->lane_mbps * MSEC_PER_SEC / 8;
} else {
/* lbcc based on pixel clock rate */
bpp = mipi_dsi_pixel_format_to_bpp(dsi->format);
if (bpp < 0) {
dev_err(dsi->dev, "failed to get bpp\n");
return 0;
}
lbcc = div_u64((u64)hcomponent * mode->clock * bpp, dsi->lanes * 8);
}
frac = lbcc % mode->clock;
lbcc = lbcc / mode->clock;
if (frac)
lbcc++;
minimum_lbcc = dw_mipi_dsi_get_minimum_lbcc(dsi);
if (lbcc < minimum_lbcc)
lbcc = minimum_lbcc;
return lbcc;
}
@ -1006,6 +1087,8 @@ static int dw_mipi_dsi_bridge_attach(struct drm_bridge *bridge,
static const struct drm_bridge_funcs dw_mipi_dsi_bridge_funcs = {
.atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
.atomic_get_input_bus_fmts = dw_mipi_dsi_bridge_atomic_get_input_bus_fmts,
.atomic_check = dw_mipi_dsi_bridge_atomic_check,
.atomic_reset = drm_atomic_helper_bridge_reset,
.atomic_pre_enable = dw_mipi_dsi_bridge_atomic_pre_enable,
.atomic_enable = dw_mipi_dsi_bridge_atomic_enable,
@ -1209,6 +1292,12 @@ void dw_mipi_dsi_set_slave(struct dw_mipi_dsi *dsi, struct dw_mipi_dsi *slave)
}
EXPORT_SYMBOL_GPL(dw_mipi_dsi_set_slave);
struct drm_bridge *dw_mipi_dsi_get_bridge(struct dw_mipi_dsi *dsi)
{
return &dsi->bridge;
}
EXPORT_SYMBOL_GPL(dw_mipi_dsi_get_bridge);
/*
* Probe/remove API, used from platforms based on the DRM bridge API.
*/

View File

@ -24,6 +24,7 @@ CONFIG_DRM_LIMA=y
CONFIG_DRM_PANEL_SIMPLE=y
CONFIG_PWM_CROS_EC=y
CONFIG_BACKLIGHT_PWM=y
CONFIG_BACKLIGHT_CLASS_DEVICE=y
CONFIG_ROCKCHIP_CDN_DP=n

View File

@ -26,6 +26,7 @@ CONFIG_DRM_ETNAVIV=y
CONFIG_DRM_I2C_ADV7511=y
CONFIG_PWM_CROS_EC=y
CONFIG_BACKLIGHT_PWM=y
CONFIG_BACKLIGHT_CLASS_DEVICE=y
CONFIG_ROCKCHIP_CDN_DP=n
@ -61,6 +62,7 @@ CONFIG_PHY_QCOM_QUSB2=y
CONFIG_PHY_QCOM_QMP=y
CONFIG_MSM_GCC_8996=y
CONFIG_QCOM_CLK_APCC_MSM8996=y
CONFIG_MSM_MMCC_8996=y
CONFIG_QCOM_LLCC=y
CONFIG_QCOM_LMH=y
CONFIG_QCOM_SPMI_TEMP_ALARM=y

View File

@ -35,7 +35,7 @@ elif [[ "$KERNEL_ARCH" = "arm" ]]; then
apt-get install -y libssl-dev:armhf
else
GCC_ARCH="x86_64-linux-gnu"
DEBIAN_ARCH="x86_64"
DEBIAN_ARCH="amd64"
DEVICE_TREES=""
fi
@ -64,10 +64,15 @@ if [ "$(git ls-remote --exit-code --heads ${UPSTREAM_REPO} ${TARGET_BRANCH}-exte
fi
# Try to merge fixes from local repo if this isn't a merge request
# otherwise try merging the fixes from the merge target
if [ -z "$CI_MERGE_REQUEST_PROJECT_PATH" ]; then
if [ "$(git ls-remote --exit-code --heads origin ${TARGET_BRANCH}-external-fixes)" ]; then
git pull origin ${TARGET_BRANCH}-external-fixes
fi
else
if [ "$(git ls-remote --exit-code --heads ${CI_MERGE_REQUEST_PROJECT_URL} ${CI_MERGE_REQUEST_TARGET_BRANCH_NAME}-external-fixes)" ]; then
git pull ${CI_MERGE_REQUEST_PROJECT_URL} ${CI_MERGE_REQUEST_TARGET_BRANCH_NAME}-external-fixes
fi
fi
for opt in $ENABLE_KCONFIGS; do
@ -148,6 +153,7 @@ mkdir -p artifacts/install/lib
mv install/* artifacts/install/.
rm -rf artifacts/install/modules
ln -s common artifacts/install/ci-common
cp .config artifacts/${CI_JOB_NAME}_config
for image in ${KERNEL_IMAGE_NAME}; do
cp /lava-files/$image artifacts/install/.

View File

@ -1,6 +1,7 @@
.build:
extends:
- .build-rules
- .container+build-rules
stage: build
artifacts:
paths:

View File

@ -1,11 +1,11 @@
variables:
DRM_CI_PROJECT_PATH: &drm-ci-project-path mesa/mesa
DRM_CI_COMMIT_SHA: &drm-ci-commit-sha 0dc961645c4f0241f8512cb0ec3ad59635842072
DRM_CI_COMMIT_SHA: &drm-ci-commit-sha edfbf74df1d4d6ce54ffe24566108be0e1a98c3d
UPSTREAM_REPO: git://anongit.freedesktop.org/drm/drm
TARGET_BRANCH: drm-next
IGT_VERSION: 471bfababd070e1dac0ebb87470ac4f2ae85e663
IGT_VERSION: d1db7333d9c5fbbb05e50b0804123950d9dc1c46
DEQP_RUNNER_GIT_URL: https://gitlab.freedesktop.org/anholt/deqp-runner.git
DEQP_RUNNER_GIT_TAG: v0.15.0
@ -24,7 +24,9 @@ variables:
PIPELINE_ARTIFACTS_BASE: ${S3_HOST}/artifacts/${CI_PROJECT_PATH}/${CI_PIPELINE_ID}
# per-job artifact storage on MinIO
JOB_ARTIFACTS_BASE: ${PIPELINE_ARTIFACTS_BASE}/${CI_JOB_ID}
# default kernel for rootfs before injecting the current kernel tree
KERNEL_IMAGE_BASE: https://${S3_HOST}/mesa-lava/gfx-ci/linux/v6.4.12-for-mesa-ci-f6b4ad45f48d
LAVA_TAGS: subset-1-gfx
LAVA_JOB_PRIORITY: 30
default:
@ -53,20 +55,6 @@ default:
export CI_JOB_JWT="$(<${CI_JOB_JWT_FILE})" &&
rm "${CI_JOB_JWT_FILE}"
# Retry when job fails.
retry:
max: 1
# Ignore runner_unsupported, stale_schedule, archived_failure, or
# unmet_prerequisites
when:
- api_failure
- runner_system_failure
- script_failure
- job_execution_timeout
- scheduler_failure
- data_integrity_failure
- unknown_failure
include:
- project: 'freedesktop/ci-templates'
ref: 16bc29078de5e0a067ff84a1a199a3760d3b3811
@ -86,6 +74,17 @@ include:
- '/.gitlab-ci/container/gitlab-ci.yml'
- '/.gitlab-ci/test/gitlab-ci.yml'
- '/.gitlab-ci/lava/lava-gitlab-ci.yml'
- '/src/microsoft/ci/gitlab-ci-inc.yml'
- '/src/gallium/drivers/zink/ci/gitlab-ci-inc.yml'
- '/src/gallium/drivers/crocus/ci/gitlab-ci-inc.yml'
- '/src/gallium/drivers/softpipe/ci/gitlab-ci-inc.yml'
- '/src/gallium/drivers/llvmpipe/ci/gitlab-ci-inc.yml'
- '/src/gallium/drivers/virgl/ci/gitlab-ci-inc.yml'
- '/src/gallium/drivers/nouveau/ci/gitlab-ci-inc.yml'
- '/src/gallium/frontends/lavapipe/ci/gitlab-ci-inc.yml'
- '/src/intel/ci/gitlab-ci-inc.yml'
- '/src/freedreno/ci/gitlab-ci-inc.yml'
- '/src/amd/ci/gitlab-ci-inc.yml'
- drivers/gpu/drm/ci/image-tags.yml
- drivers/gpu/drm/ci/container.yml
- drivers/gpu/drm/ci/static-checks.yml
@ -154,6 +153,11 @@ stages:
# Run automatically once all dependency jobs have passed
- when: on_success
# When to automatically run the CI for container jobs
.container+build-rules:
rules:
- !reference [.no_scheduled_pipelines-rules, rules]
- when: manual
.ci-deqp-artifacts:
artifacts:

View File

@ -20,11 +20,16 @@ set +e
cat /sys/kernel/debug/dri/*/state
set -e
# Cannot use HWCI_KERNEL_MODULES as at that point we don't have the module in /lib
if [ "$IGT_FORCE_DRIVER" = "amdgpu" ]; then
mv /install/modules/lib/modules/* /lib/modules/.
modprobe amdgpu
fi
case "$DRIVER_NAME" in
rockchip|mediatek|meson)
export IGT_FORCE_DRIVER="panfrost"
;;
amdgpu)
# Cannot use HWCI_KERNEL_MODULES as at that point we don't have the module in /lib
mv /install/modules/lib/modules/* /lib/modules/.
modprobe amdgpu
;;
esac
if [ -e "/install/xfails/$DRIVER_NAME-$GPU_VERSION-skips.txt" ]; then
IGT_SKIPS="--skips /install/xfails/$DRIVER_NAME-$GPU_VERSION-skips.txt"
@ -48,6 +53,20 @@ fi
curl -L --retry 4 -f --retry-all-errors --retry-delay 60 -s ${FDO_HTTP_CACHE_URI:-}$PIPELINE_ARTIFACTS_BASE/$ARCH/igt.tar.gz | tar --zstd -v -x -C /
# If the job is parallel at the gitab job level, take the corresponding fraction
# of the caselist.
if [ -n "$CI_NODE_INDEX" ]; then
sed -ni $CI_NODE_INDEX~$CI_NODE_TOTAL"p" /install/testlist.txt
fi
# core_getversion checks if the driver is loaded and probed correctly
# so run it in all shards
if ! grep -q "core_getversion" /install/testlist.txt; then
# Add the line to the file
echo "core_getversion" >> /install/testlist.txt
fi
set +e
igt-runner \
run \
@ -57,8 +76,6 @@ igt-runner \
$IGT_SKIPS \
$IGT_FLAKES \
$IGT_FAILS \
--fraction-start $CI_NODE_INDEX \
--fraction $CI_NODE_TOTAL \
--jobs 1
ret=$?
set -e

View File

@ -1,12 +1,12 @@
variables:
CONTAINER_TAG: "2023-08-10-mesa-uprev"
CONTAINER_TAG: "2023-10-11-mesa-uprev"
DEBIAN_X86_64_BUILD_BASE_IMAGE: "debian/x86_64_build-base"
DEBIAN_BASE_TAG: "${CONTAINER_TAG}"
DEBIAN_X86_64_BUILD_IMAGE_PATH: "debian/x86_64_build"
DEBIAN_BUILD_TAG: "${CONTAINER_TAG}"
DEBIAN_BUILD_TAG: "2023-10-08-config"
KERNEL_ROOTFS_TAG: "${CONTAINER_TAG}"
KERNEL_ROOTFS_TAG: "2023-10-06-amd"
DEBIAN_X86_64_TEST_BASE_IMAGE: "debian/x86_64_test-base"
DEBIAN_X86_64_TEST_IMAGE_GL_PATH: "debian/x86_64_test-gl"

View File

@ -22,7 +22,7 @@ cp "$SCRIPTS_DIR"/setup-test-env.sh results/job-rootfs-overlay/
# Prepare env vars for upload.
section_start variables "Variables passed through:"
KERNEL_IMAGE_BASE_URL="https://${BASE_SYSTEM_HOST_PATH}" \
KERNEL_IMAGE_BASE="https://${BASE_SYSTEM_HOST_PATH}" \
artifacts/ci-common/generate-env.sh | tee results/job-rootfs-overlay/set-job-env-vars.sh
section_end variables
@ -37,8 +37,8 @@ PYTHONPATH=artifacts/ artifacts/lava/lava_job_submitter.py \
--dump-yaml \
--pipeline-info "$CI_JOB_NAME: $CI_PIPELINE_URL on $CI_COMMIT_REF_NAME ${CI_NODE_INDEX}/${CI_NODE_TOTAL}" \
--rootfs-url-prefix "https://${BASE_SYSTEM_HOST_PATH}" \
--kernel-url-prefix "https://${PIPELINE_ARTIFACTS_BASE}/${ARCH}" \
--build-url "${FDO_HTTP_CACHE_URI:-}https://${PIPELINE_ARTIFACTS_BASE}/${ARCH}/kernel-files.tar.zst" \
--kernel-url-prefix "https://${PIPELINE_ARTIFACTS_BASE}/${DEBIAN_ARCH}" \
--build-url "${FDO_HTTP_CACHE_URI:-}https://${PIPELINE_ARTIFACTS_BASE}/${DEBIAN_ARCH}/kernel-files.tar.zst" \
--job-rootfs-overlay-url "${FDO_HTTP_CACHE_URI:-}https://${JOB_ROOTFS_OVERLAY_PATH}" \
--job-timeout-min ${JOB_TIMEOUT:-80} \
--first-stage-init artifacts/ci-common/init-stage1.sh \

View File

@ -23,7 +23,7 @@
- .lava-test:arm32
variables:
HWCI_TEST_SCRIPT: "/install/igt_runner.sh"
ARCH: "armhf"
DEBIAN_ARCH: "armhf"
dependencies:
- testing:arm32
needs:
@ -38,7 +38,7 @@
- .lava-test:arm64
variables:
HWCI_TEST_SCRIPT: "/install/igt_runner.sh"
ARCH: "arm64"
DEBIAN_ARCH: "arm64"
dependencies:
- testing:arm64
needs:
@ -53,7 +53,7 @@
- .lava-test:x86_64
variables:
HWCI_TEST_SCRIPT: "/install/igt_runner.sh"
ARCH: "x86_64"
DEBIAN_ARCH: "amd64"
dependencies:
- testing:x86_64
needs:
@ -86,7 +86,7 @@ msm:sc7180:
extends:
- .lava-igt:arm64
stage: msm
parallel: 2
parallel: 4
variables:
DRIVER_NAME: msm
DEVICE_TYPE: sc7180-trogdor-lazor-limozeen
@ -155,7 +155,7 @@ rockchip:rk3399:
extends:
- .lava-igt:arm64
stage: rockchip
parallel: 3
parallel: 2
variables:
DRIVER_NAME: rockchip
DEVICE_TYPE: rk3399-gru-kevin
@ -178,7 +178,8 @@ rockchip:rk3399:
i915:apl:
extends:
- .i915
parallel: 12
parallel: 3
timeout: "1h30m"
variables:
DEVICE_TYPE: asus-C523NA-A20057-coral
GPU_VERSION: apl
@ -187,7 +188,8 @@ i915:apl:
i915:glk:
extends:
- .i915
parallel: 5
parallel: 2
timeout: "1h30m"
variables:
DEVICE_TYPE: hp-x360-12b-ca0010nr-n4020-octopus
GPU_VERSION: glk
@ -196,7 +198,8 @@ i915:glk:
i915:amly:
extends:
- .i915
parallel: 8
parallel: 2
timeout: "1h30m"
variables:
DEVICE_TYPE: asus-C433TA-AJ0005-rammus
GPU_VERSION: amly
@ -205,7 +208,7 @@ i915:amly:
i915:kbl:
extends:
- .i915
parallel: 5
parallel: 3
variables:
DEVICE_TYPE: hp-x360-14-G1-sona
GPU_VERSION: kbl
@ -214,7 +217,8 @@ i915:kbl:
i915:whl:
extends:
- .i915
parallel: 8
parallel: 2
timeout: "1h30m"
variables:
DEVICE_TYPE: dell-latitude-5400-8665U-sarien
GPU_VERSION: whl
@ -223,7 +227,8 @@ i915:whl:
i915:cml:
extends:
- .i915
parallel: 6
parallel: 2
timeout: "1h30m"
variables:
DEVICE_TYPE: asus-C436FA-Flip-hatch
GPU_VERSION: cml
@ -232,7 +237,7 @@ i915:cml:
i915:tgl:
extends:
- .i915
parallel: 6
parallel: 8
variables:
DEVICE_TYPE: asus-cx9400-volteer
GPU_VERSION: tgl
@ -251,6 +256,7 @@ i915:tgl:
amdgpu:stoney:
extends:
- .amdgpu
parallel: 2
variables:
DEVICE_TYPE: hp-11A-G6-EE-grunt
GPU_VERSION: stoney
@ -269,6 +275,7 @@ amdgpu:stoney:
mediatek:mt8173:
extends:
- .mediatek
parallel: 4
variables:
DEVICE_TYPE: mt8173-elm-hana
GPU_VERSION: mt8173
@ -280,6 +287,7 @@ mediatek:mt8173:
mediatek:mt8183:
extends:
- .mediatek
parallel: 3
variables:
DEVICE_TYPE: mt8183-kukui-jacuzzi-juniper-sku16
GPU_VERSION: mt8183
@ -289,6 +297,7 @@ mediatek:mt8183:
.mediatek:mt8192:
extends:
- .mediatek
parallel: 3
variables:
DEVICE_TYPE: mt8192-asurada-spherion-r0
GPU_VERSION: mt8192
@ -307,6 +316,7 @@ mediatek:mt8183:
meson:g12b:
extends:
- .meson
parallel: 3
variables:
DEVICE_TYPE: meson-g12b-a311d-khadas-vim3
GPU_VERSION: g12b

View File

@ -1,8 +1,14 @@
kms_addfb_basic@bad-pitch-65536,Fail
kms_addfb_basic@bo-too-small,Fail
kms_addfb_basic@too-high,Fail
kms_async_flips@async-flip-with-page-flip-events,Fail
kms_async_flips@crc,Fail
kms_async_flips@invalid-async-flip,Fail
kms_atomic@plane-immutable-zpos,Fail
kms_atomic_transition@plane-all-modeset-transition-internal-panels,Fail
kms_atomic_transition@plane-all-transition,Fail
kms_atomic_transition@plane-all-transition-nonblocking,Fail
kms_atomic_transition@plane-toggle-modeset-transition,Fail
kms_atomic_transition@plane-use-after-nonblocking-unbind,Fail
kms_bw@linear-tiling-1-displays-2560x1440p,Fail
kms_bw@linear-tiling-1-displays-3840x2160p,Fail
kms_bw@linear-tiling-2-displays-3840x2160p,Fail
@ -11,9 +17,11 @@ kms_color@degamma,Fail
kms_cursor_crc@cursor-size-change,Fail
kms_cursor_crc@pipe-A-cursor-size-change,Fail
kms_cursor_crc@pipe-B-cursor-size-change,Fail
kms_cursor_legacy@forked-move,Fail
kms_flip@flip-vs-modeset-vs-hang,Fail
kms_flip@flip-vs-panning-vs-hang,Fail
kms_hdr@bpc-switch,Fail
kms_hdr@bpc-switch-dpms,Fail
kms_plane@pixel-format,Fail
kms_plane_multiple@atomic-pipe-A-tiling-none,Fail
kms_rmfb@close-fd,Fail
kms_rotation_crc@primary-rotation-180,Fail

View File

@ -1,21 +1 @@
kms_addfb_basic@too-high
kms_async_flips@alternate-sync-async-flip
kms_async_flips@async-flip-with-page-flip-events
kms_async_flips@crc
kms_async_flips@test-cursor
kms_async_flips@test-time-stamp
kms_atomic_transition@plane-all-modeset-transition-internal-panels
kms_atomic_transition@plane-all-transition
kms_atomic_transition@plane-use-after-nonblocking-unbind
kms_bw@linear-tiling-1-displays-1920x1080p
kms_bw@linear-tiling-2-displays-1920x1080p
kms_bw@linear-tiling-2-displays-2560x1440p
kms_bw@linear-tiling-3-displays-2560x1440p
kms_bw@linear-tiling-3-displays-3840x2160p
kms_cursor_crc@pipe-A-cursor-alpha-opaque
kms_cursor_crc@pipe-B-cursor-alpha-opaque
kms_plane@pixel-format
kms_plane_multiple@atomic-pipe-B-tiling-none
kms_plane_scaling@downscale-with-rotation-factor-0-5
kms_universal_plane@disable-primary-vs-flip-pipe-A
kms_universal_plane@disable-primary-vs-flip-pipe-B

View File

@ -2,6 +2,10 @@ kms_flip_scaled_crc@flip-32bpp-linear-to-64bpp-linear-downscaling,Fail
kms_flip_scaled_crc@flip-32bpp-linear-to-64bpp-linear-upscaling,Fail
kms_flip_scaled_crc@flip-32bpp-xtile-to-64bpp-xtile-downscaling,Fail
kms_flip_scaled_crc@flip-32bpp-xtile-to-64bpp-xtile-upscaling,Fail
kms_flip_scaled_crc@flip-32bpp-ytile-to-64bpp-ytile-downscaling,Fail
kms_flip_scaled_crc@flip-32bpp-ytile-to-64bpp-ytile-upscaling,Fail
kms_flip_scaled_crc@flip-32bpp-ytileccs-to-64bpp-ytile-downscaling,Fail
kms_flip_scaled_crc@flip-32bpp-ytileccs-to-64bpp-ytile-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-linear-to-16bpp-linear-downscaling,Fail
kms_flip_scaled_crc@flip-64bpp-linear-to-16bpp-linear-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-linear-to-32bpp-linear-downscaling,Fail
@ -10,7 +14,12 @@ kms_flip_scaled_crc@flip-64bpp-xtile-to-16bpp-xtile-downscaling,Fail
kms_flip_scaled_crc@flip-64bpp-xtile-to-16bpp-xtile-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-xtile-to-32bpp-xtile-downscaling,Fail
kms_flip_scaled_crc@flip-64bpp-xtile-to-32bpp-xtile-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-ytile-to-16bpp-ytile-downscaling,Fail
kms_flip_scaled_crc@flip-64bpp-ytile-to-16bpp-ytile-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytile-downscaling,Fail
kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytile-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilegen12rcccs-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilercccs-downscaling,Fail
kms_plane_alpha_blend@alpha-basic,Fail
kms_plane_alpha_blend@alpha-opaque-fb,Fail
kms_plane_alpha_blend@alpha-transparent-fb,Fail

View File

@ -1,32 +0,0 @@
kms_bw@linear-tiling-2-displays-1920x1080p
kms_bw@linear-tiling-2-displays-2560x1440p
kms_bw@linear-tiling-2-displays-3840x2160p
kms_bw@linear-tiling-3-displays-1920x1080p
kms_bw@linear-tiling-3-displays-2560x1440p
kms_bw@linear-tiling-3-displays-3840x2160p
kms_bw@linear-tiling-4-displays-1920x1080p
kms_bw@linear-tiling-4-displays-2560x1440p
kms_bw@linear-tiling-4-displays-3840x2160p
kms_flip_scaled_crc@flip-32bpp-ytile-to-64bpp-ytile-downscaling
kms_flip_scaled_crc@flip-32bpp-ytile-to-64bpp-ytile-upscaling
kms_flip_scaled_crc@flip-32bpp-ytileccs-to-64bpp-ytile-downscaling
kms_flip_scaled_crc@flip-32bpp-ytileccs-to-64bpp-ytile-upscaling
kms_flip_scaled_crc@flip-64bpp-ytile-to-16bpp-ytile-downscaling
kms_flip_scaled_crc@flip-64bpp-ytile-to-16bpp-ytile-upscaling
kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytile-downscaling
kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytile-upscaling
kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilercccs-downscaling
kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilercccs-upscaling
kms_plane_alpha_blend@pipe-A-alpha-basic
kms_plane_alpha_blend@pipe-A-alpha-opaque-fb
kms_plane_alpha_blend@pipe-A-alpha-transparent-fb
kms_plane_alpha_blend@pipe-A-constant-alpha-max
kms_plane_alpha_blend@pipe-B-alpha-basic
kms_plane_alpha_blend@pipe-B-alpha-opaque-fb
kms_plane_alpha_blend@pipe-B-alpha-transparent-fb
kms_plane_alpha_blend@pipe-B-constant-alpha-max
kms_plane_alpha_blend@pipe-C-alpha-basic
kms_plane_alpha_blend@pipe-C-alpha-opaque-fb
kms_plane_alpha_blend@pipe-C-alpha-transparent-fb
kms_plane_alpha_blend@pipe-C-constant-alpha-max
kms_sysfs_edid_timing

View File

@ -8,13 +8,6 @@ kms_bw@linear-tiling-3-displays-3840x2160p,Fail
kms_bw@linear-tiling-4-displays-1920x1080p,Fail
kms_bw@linear-tiling-4-displays-2560x1440p,Fail
kms_bw@linear-tiling-4-displays-3840x2160p,Fail
kms_color@ctm-0-25,Fail
kms_color@ctm-0-50,Fail
kms_color@ctm-0-75,Fail
kms_color@ctm-max,Fail
kms_color@ctm-negative,Fail
kms_color@ctm-red-to-blue,Fail
kms_color@ctm-signed,Fail
kms_flip_scaled_crc@flip-32bpp-linear-to-64bpp-linear-downscaling,Fail
kms_flip_scaled_crc@flip-32bpp-linear-to-64bpp-linear-upscaling,Fail
kms_flip_scaled_crc@flip-32bpp-xtile-to-64bpp-xtile-downscaling,Fail
@ -38,8 +31,6 @@ kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytile-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilegen12rcccs-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilercccs-downscaling,Fail
kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilercccs-upscaling,Fail
kms_hdmi_inject@inject-4k,Timeout
kms_plane@plane-position-hole,Timeout
kms_plane_alpha_blend@alpha-basic,Fail
kms_plane_alpha_blend@alpha-opaque-fb,Fail
kms_plane_alpha_blend@alpha-transparent-fb,Fail
@ -53,6 +44,4 @@ kms_plane_alpha_blend@pipe-B-constant-alpha-max,Fail
kms_plane_alpha_blend@pipe-C-alpha-opaque-fb,Fail
kms_plane_alpha_blend@pipe-C-alpha-transparent-fb,Fail
kms_plane_alpha_blend@pipe-C-constant-alpha-max,Fail
kms_plane_multiple@tiling-y,Timeout
kms_pwrite_crc,Timeout
kms_sysfs_edid_timing,Fail

View File

@ -1 +0,0 @@
kms_frontbuffer_tracking@fbc-tiling-linear

View File

@ -1,8 +1,11 @@
kms_color@ctm-0-25,Fail
kms_flip_scaled_crc@flip-32bpp-linear-to-64bpp-linear-downscaling,Fail
kms_flip_scaled_crc@flip-32bpp-linear-to-64bpp-linear-upscaling,Fail
kms_flip_scaled_crc@flip-32bpp-xtile-to-64bpp-xtile-downscaling,Fail
kms_flip_scaled_crc@flip-32bpp-xtile-to-64bpp-xtile-upscaling,Fail
kms_flip_scaled_crc@flip-32bpp-ytile-to-64bpp-ytile-downscaling,Fail
kms_flip_scaled_crc@flip-32bpp-ytile-to-64bpp-ytile-upscaling,Fail
kms_flip_scaled_crc@flip-32bpp-ytileccs-to-64bpp-ytile-downscaling,Fail
kms_flip_scaled_crc@flip-32bpp-ytileccs-to-64bpp-ytile-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-linear-to-16bpp-linear-downscaling,Fail
kms_flip_scaled_crc@flip-64bpp-linear-to-16bpp-linear-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-linear-to-32bpp-linear-downscaling,Fail
@ -11,8 +14,17 @@ kms_flip_scaled_crc@flip-64bpp-xtile-to-16bpp-xtile-downscaling,Fail
kms_flip_scaled_crc@flip-64bpp-xtile-to-16bpp-xtile-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-xtile-to-32bpp-xtile-downscaling,Fail
kms_flip_scaled_crc@flip-64bpp-xtile-to-32bpp-xtile-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-ytile-to-16bpp-ytile-downscaling,Fail
kms_flip_scaled_crc@flip-64bpp-ytile-to-16bpp-ytile-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytile-downscaling,Fail
kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytile-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilegen12rcccs-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilercccs-downscaling,Fail
kms_plane_alpha_blend@alpha-basic,Fail
kms_plane_alpha_blend@alpha-opaque-fb,Fail
kms_plane_alpha_blend@alpha-transparent-fb,Fail
kms_plane_alpha_blend@constant-alpha-max,Fail
kms_plane_alpha_blend@constant-alpha-min,Fail
kms_psr2_su@page_flip-NV12,Fail
kms_psr2_su@page_flip-P010,Fail
kms_setmode@basic,Fail

View File

@ -1,38 +0,0 @@
kms_bw@linear-tiling-2-displays-1920x1080p
kms_bw@linear-tiling-2-displays-2560x1440p
kms_bw@linear-tiling-2-displays-3840x2160p
kms_bw@linear-tiling-3-displays-1920x1080p
kms_bw@linear-tiling-3-displays-2560x1440p
kms_bw@linear-tiling-3-displays-3840x2160p
kms_bw@linear-tiling-4-displays-1920x1080p
kms_bw@linear-tiling-4-displays-2560x1440p
kms_bw@linear-tiling-4-displays-3840x2160p
kms_draw_crc@draw-method-xrgb8888-render-xtiled
kms_flip@flip-vs-suspend
kms_flip_scaled_crc@flip-32bpp-ytile-to-64bpp-ytile-downscaling
kms_flip_scaled_crc@flip-32bpp-ytile-to-64bpp-ytile-upscaling
kms_flip_scaled_crc@flip-32bpp-ytileccs-to-64bpp-ytile-downscaling
kms_flip_scaled_crc@flip-32bpp-ytileccs-to-64bpp-ytile-upscaling
kms_flip_scaled_crc@flip-64bpp-ytile-to-16bpp-ytile-downscaling
kms_flip_scaled_crc@flip-64bpp-ytile-to-16bpp-ytile-upscaling
kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytile-downscaling
kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytile-upscaling
kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilercccs-downscaling
kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilercccs-upscaling
kms_hdr@bpc-switch-suspend
kms_plane_alpha_blend@constant-alpha-min
kms_plane_alpha_blend@pipe-A-alpha-basic
kms_plane_alpha_blend@pipe-A-alpha-opaque-fb
kms_plane_alpha_blend@pipe-A-alpha-transparent-fb
kms_plane_alpha_blend@pipe-A-constant-alpha-max
kms_plane_alpha_blend@pipe-B-alpha-basic
kms_plane_alpha_blend@pipe-B-alpha-opaque-fb
kms_plane_alpha_blend@pipe-B-alpha-transparent-fb
kms_plane_alpha_blend@pipe-B-constant-alpha-max
kms_plane_alpha_blend@pipe-C-alpha-basic
kms_plane_alpha_blend@pipe-C-alpha-opaque-fb
kms_plane_alpha_blend@pipe-C-alpha-transparent-fb
kms_plane_alpha_blend@pipe-C-constant-alpha-max
kms_psr2_su@page_flip-NV12
kms_psr2_su@page_flip-P010
kms_setmode@basic

View File

@ -1,8 +1,15 @@
kms_fbcon_fbt@fbc,Fail
kms_flip@blocking-wf_vblank,Fail
kms_flip@wf_vblank-ts-check,Fail
kms_flip@wf_vblank-ts-check-interruptible,Fail
kms_flip_scaled_crc@flip-32bpp-linear-to-64bpp-linear-downscaling,Fail
kms_flip_scaled_crc@flip-32bpp-linear-to-64bpp-linear-upscaling,Fail
kms_flip_scaled_crc@flip-32bpp-xtile-to-64bpp-xtile-downscaling,Fail
kms_flip_scaled_crc@flip-32bpp-xtile-to-64bpp-xtile-upscaling,Fail
kms_flip_scaled_crc@flip-32bpp-ytile-to-64bpp-ytile-downscaling,Fail
kms_flip_scaled_crc@flip-32bpp-ytile-to-64bpp-ytile-upscaling,Fail
kms_flip_scaled_crc@flip-32bpp-ytileccs-to-64bpp-ytile-downscaling,Fail
kms_flip_scaled_crc@flip-32bpp-ytileccs-to-64bpp-ytile-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-linear-to-16bpp-linear-downscaling,Fail
kms_flip_scaled_crc@flip-64bpp-linear-to-16bpp-linear-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-linear-to-32bpp-linear-downscaling,Fail
@ -11,9 +18,19 @@ kms_flip_scaled_crc@flip-64bpp-xtile-to-16bpp-xtile-downscaling,Fail
kms_flip_scaled_crc@flip-64bpp-xtile-to-16bpp-xtile-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-xtile-to-32bpp-xtile-downscaling,Fail
kms_flip_scaled_crc@flip-64bpp-xtile-to-32bpp-xtile-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-ytile-to-16bpp-ytile-downscaling,Fail
kms_flip_scaled_crc@flip-64bpp-ytile-to-16bpp-ytile-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytile-downscaling,Fail
kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytile-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilegen12rcccs-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilercccs-downscaling,Fail
kms_frontbuffer_tracking@fbc-tiling-linear,Fail
kms_frontbuffer_tracking@fbcdrrs-tiling-linear,Fail
kms_plane_alpha_blend@alpha-basic,Fail
kms_plane_alpha_blend@alpha-opaque-fb,Fail
kms_plane_alpha_blend@alpha-transparent-fb,Fail
kms_plane_alpha_blend@constant-alpha-max,Fail
kms_rotation_crc@multiplane-rotation,Fail
kms_rotation_crc@multiplane-rotation-cropping-bottom,Fail
kms_rotation_crc@multiplane-rotation-cropping-top,Fail
kms_setmode@basic,Fail

View File

@ -1,41 +0,0 @@
kms_bw@linear-tiling-1-displays-3840x2160p
kms_bw@linear-tiling-2-displays-1920x1080p
kms_bw@linear-tiling-2-displays-2560x1440p
kms_bw@linear-tiling-2-displays-3840x2160p
kms_bw@linear-tiling-3-displays-1920x1080p
kms_bw@linear-tiling-3-displays-2560x1440p
kms_bw@linear-tiling-3-displays-3840x2160p
kms_bw@linear-tiling-4-displays-1920x1080p
kms_bw@linear-tiling-4-displays-2560x1440p
kms_bw@linear-tiling-4-displays-3840x2160p
kms_flip@blocking-wf_vblank
kms_flip@wf_vblank-ts-check
kms_flip@wf_vblank-ts-check-interruptible
kms_flip_scaled_crc@flip-32bpp-ytile-to-64bpp-ytile-downscaling
kms_flip_scaled_crc@flip-32bpp-ytile-to-64bpp-ytile-upscaling
kms_flip_scaled_crc@flip-32bpp-ytileccs-to-64bpp-ytile-downscaling
kms_flip_scaled_crc@flip-32bpp-ytileccs-to-64bpp-ytile-upscaling
kms_flip_scaled_crc@flip-64bpp-ytile-to-16bpp-ytile-downscaling
kms_flip_scaled_crc@flip-64bpp-ytile-to-16bpp-ytile-upscaling
kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytile-downscaling
kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytile-upscaling
kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilercccs-downscaling
kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilercccs-upscaling
kms_frontbuffer_tracking@fbc-tiling-linear
kms_plane_alpha_blend@pipe-A-alpha-basic
kms_plane_alpha_blend@pipe-A-alpha-opaque-fb
kms_plane_alpha_blend@pipe-A-alpha-transparent-fb
kms_plane_alpha_blend@pipe-A-constant-alpha-max
kms_plane_alpha_blend@pipe-B-alpha-basic
kms_plane_alpha_blend@pipe-B-alpha-opaque-fb
kms_plane_alpha_blend@pipe-B-alpha-transparent-fb
kms_plane_alpha_blend@pipe-B-constant-alpha-max
kms_plane_alpha_blend@pipe-C-alpha-basic
kms_plane_alpha_blend@pipe-C-alpha-opaque-fb
kms_plane_alpha_blend@pipe-C-alpha-transparent-fb
kms_plane_alpha_blend@pipe-C-constant-alpha-max
kms_prop_blob@invalid-set-prop-any
kms_rotation_crc@multiplane-rotation
kms_rotation_crc@multiplane-rotation-cropping-bottom
kms_rotation_crc@multiplane-rotation-cropping-top
kms_setmode@basic

View File

@ -4,7 +4,10 @@ kms_flip_scaled_crc@flip-32bpp-linear-to-64bpp-linear-downscaling,Fail
kms_flip_scaled_crc@flip-32bpp-linear-to-64bpp-linear-upscaling,Fail
kms_flip_scaled_crc@flip-32bpp-xtile-to-64bpp-xtile-downscaling,Fail
kms_flip_scaled_crc@flip-32bpp-xtile-to-64bpp-xtile-upscaling,Fail
kms_flip_scaled_crc@flip-32bpp-ytile-to-64bpp-ytile-downscaling,Fail
kms_flip_scaled_crc@flip-32bpp-ytile-to-64bpp-ytile-upscaling,Fail
kms_flip_scaled_crc@flip-32bpp-ytileccs-to-64bpp-ytile-downscaling,Fail
kms_flip_scaled_crc@flip-32bpp-ytileccs-to-64bpp-ytile-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-linear-to-16bpp-linear-downscaling,Fail
kms_flip_scaled_crc@flip-64bpp-linear-to-16bpp-linear-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-linear-to-32bpp-linear-downscaling,Fail
@ -13,8 +16,12 @@ kms_flip_scaled_crc@flip-64bpp-xtile-to-16bpp-xtile-downscaling,Fail
kms_flip_scaled_crc@flip-64bpp-xtile-to-16bpp-xtile-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-xtile-to-32bpp-xtile-downscaling,Fail
kms_flip_scaled_crc@flip-64bpp-xtile-to-32bpp-xtile-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-ytile-to-16bpp-ytile-downscaling,Fail
kms_flip_scaled_crc@flip-64bpp-ytile-to-16bpp-ytile-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytile-downscaling,Fail
kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytile-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilegen12rcccs-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilercccs-downscaling,Fail
kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilercccs-upscaling,Fail
kms_plane_alpha_blend@alpha-basic,Fail
kms_plane_alpha_blend@alpha-opaque-fb,Fail

View File

@ -1,26 +1 @@
kms_async_flips@crc
kms_bw@linear-tiling-2-displays-1920x1080p
kms_bw@linear-tiling-2-displays-3840x2160p
kms_bw@linear-tiling-3-displays-1920x1080p
kms_bw@linear-tiling-3-displays-2560x1440p
kms_bw@linear-tiling-3-displays-3840x2160p
kms_bw@linear-tiling-4-displays-1920x1080p
kms_bw@linear-tiling-4-displays-3840x2160p
kms_color@ctm-0-25
kms_flip_scaled_crc@flip-32bpp-ytile-to-64bpp-ytile-downscaling
kms_flip_scaled_crc@flip-32bpp-ytileccs-to-64bpp-ytile-downscaling
kms_flip_scaled_crc@flip-32bpp-ytileccs-to-64bpp-ytile-upscaling
kms_flip_scaled_crc@flip-64bpp-ytile-to-16bpp-ytile-downscaling
kms_flip_scaled_crc@flip-64bpp-ytile-to-16bpp-ytile-upscaling
kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytile-upscaling
kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilercccs-downscaling
kms_plane_alpha_blend@pipe-A-alpha-basic
kms_plane_alpha_blend@pipe-A-alpha-opaque-fb
kms_plane_alpha_blend@pipe-A-alpha-transparent-fb
kms_plane_alpha_blend@pipe-B-alpha-basic
kms_plane_alpha_blend@pipe-B-alpha-transparent-fb
kms_plane_alpha_blend@pipe-B-constant-alpha-max
kms_plane_alpha_blend@pipe-C-alpha-basic
kms_plane_alpha_blend@pipe-C-alpha-opaque-fb
kms_plane_alpha_blend@pipe-C-alpha-transparent-fb
kms_sysfs_edid_timing

View File

@ -8,7 +8,6 @@ kms_bw@linear-tiling-4-displays-3840x2160p,Fail
kms_bw@linear-tiling-5-displays-1920x1080p,Fail
kms_bw@linear-tiling-5-displays-2560x1440p,Fail
kms_bw@linear-tiling-5-displays-3840x2160p,Fail
kms_color@ctm-0-25,Fail
kms_flip@flip-vs-panning-vs-hang,Timeout
kms_flip_scaled_crc@flip-32bpp-linear-to-64bpp-linear-downscaling,Fail
kms_flip_scaled_crc@flip-32bpp-linear-to-64bpp-linear-upscaling,Fail

View File

@ -1,5 +0,0 @@
kms_draw_crc@.*
kms_flip@blocking-absolute-wf_vblank
kms_flip@bo-too-big-interruptible
kms_flip@busy-flip
kms_flip@flip-vs-rmfb-interruptible

View File

@ -1 +0,0 @@
kms_flip@flip-vs-suspend

View File

@ -6,5 +6,8 @@ kms_bw@linear-tiling-2-displays-3840x2160p,Fail
kms_bw@linear-tiling-3-displays-2560x1440p,Fail
kms_bw@linear-tiling-3-displays-3840x2160p,Fail
kms_color@pipe-A-invalid-gamma-lut-sizes,Fail
kms_plane_cursor@overlay,Fail
kms_plane_cursor@primary,Fail
kms_plane_cursor@viewport,Fail
kms_plane_scaling@upscale-with-rotation-20x20,Fail
kms_rmfb@close-fd,Fail
kms_rmfb@close-fd,Fail

View File

@ -1,14 +0,0 @@
core_setmaster_vs_auth
kms_bw@linear-tiling-1-displays-1920x1080p
kms_bw@linear-tiling-1-displays-3840x2160p
kms_bw@linear-tiling-3-displays-1920x1080p
kms_cursor_legacy@cursor-vs-flip-atomic
kms_plane_scaling@invalid-num-scalers
kms_plane_scaling@planes-upscale-20x20
kms_plane_scaling@planes-upscale-20x20-downscale-factor-0-5
kms_plane_scaling@upscale-with-modifier-20x20
kms_plane_scaling@upscale-with-pixel-format-20x20
kms_prop_blob@invalid-set-prop-any
kms_properties@get_properties-sanity-atomic
kms_properties@plane-properties-atomic
kms_properties@plane-properties-legacy

View File

@ -1,12 +1,16 @@
kms_3d,Fail
kms_properties@connector-properties-atomic,Fail
kms_properties@get_properties-sanity-atomic,Fail
kms_properties@get_properties-sanity-non-atomic,Fail
kms_properties@connector-properties-legacy,Fail
kms_cursor_legacy@forked-bo,Fail
kms_cursor_legacy@forked-move,Fail
kms_cursor_legacy@single-bo,Fail
kms_cursor_legacy@single-move,Fail
kms_cursor_legacy@torture-bo,Fail
kms_cursor_legacy@torture-move,Fail
kms_hdmi_inject@inject-4k,Fail
kms_force_connector_basic@force-edid,Fail
kms_hdmi_inject@inject-4k,Fail
kms_plane_cursor@overlay,Fail
kms_plane_cursor@primary,Fail
kms_plane_cursor@viewport,Fail
kms_properties@connector-properties-atomic,Fail
kms_properties@connector-properties-legacy,Fail
kms_properties@get_properties-sanity-atomic,Fail
kms_properties@get_properties-sanity-non-atomic,Fail

View File

@ -1,4 +0,0 @@
kms_force_connector_basic@force-connector-state
kms_force_connector_basic@force-edid
kms_force_connector_basic@force-load-detect
kms_force_connector_basic@prune-stale-modes

View File

@ -1,4 +0,0 @@
kms_force_connector_basic@force-connector-state
kms_force_connector_basic@force-edid
kms_force_connector_basic@force-load-detect
kms_force_connector_basic@prune-stale-modes

View File

@ -1,2 +1,4 @@
kms_3d,Fail
kms_addfb_basic@addfb25-bad-modifier,Fail
kms_force_connector_basic@force-edid,Fail
kms_hdmi_inject@inject-4k,Fail

View File

@ -1,4 +0,0 @@
kms_force_connector_basic@force-connector-state
kms_force_connector_basic@force-edid
kms_force_connector_basic@force-load-detect
kms_force_connector_basic@prune-stale-modes

View File

@ -1,12 +1,17 @@
kms_color@ctm-0-25,Fail
kms_color@ctm-0-50,Fail
kms_color@ctm-0-75,Fail
kms_color@ctm-blue-to-red,Fail
kms_color@ctm-green-to-red,Fail
kms_color@ctm-negative,Fail
kms_color@ctm-red-to-blue,Fail
kms_color@ctm-signed,Fail
kms_cursor_legacy@cursor-vs-flip-toggle,Fail
kms_cursor_legacy@cursor-vs-flip-varying-size,Fail
kms_cursor_legacy@cursorA-vs-flipA-atomic-transitions,Crash
kms_flip@flip-vs-modeset-vs-hang,Fail
kms_flip@flip-vs-panning-vs-hang,Fail
kms_pipe_crc_basic@compare-crc-sanitycheck-nv12,Fail
kms_plane@pixel-format,Fail
kms_plane@pixel-format-source-clamping,Fail
kms_plane@plane-position-covered,Fail
kms_plane@plane-position-hole,Fail
kms_plane@plane-position-hole-dpms,Fail
kms_plane_alpha_blend@alpha-7efc,Fail
kms_plane_alpha_blend@coverage-7efc,Fail
kms_plane_alpha_blend@coverage-vs-premult-vs-constant,Fail

View File

@ -1,7 +1,17 @@
# Test ends up reading CRC from frame before cursor update
# bug
# sometimes.. tbd if this is a kernel CRC bug or a test
kms_cursor_crc@.*
kms_plane_multiple@atomic-pipe-A-tiling-none
kms_atomic_transition@modeset-transition-nonblocking-fencing,Fail
kms_color@ctm-0-25
kms_color@ctm-0-50
kms_color@ctm-0-75
kms_color@ctm-blue-to-red
kms_color@ctm-green-to-red
kms_color@ctm-negative
kms_color@ctm-red-to-blue
kms_color@ctm-signed
kms_flip@flip-vs-modeset-vs-hang
kms_flip@flip-vs-panning-vs-hang
kms_plane@pixel-format
kms_plane@pixel-format-source-clamping
kms_plane@plane-position-covered
kms_plane@plane-position-hole
kms_plane@plane-position-hole-dpms
kms_writeback@writeback-fb-id
kms_writeback@writeback-invalid-parameters

View File

@ -4,20 +4,4 @@
# Test incorrectly assumes that CTM support implies gamma/degamma
# LUT support. None of the subtests handle the case of only having
# CTM support
kms_color.*
# 4k@60 is not supported on this hw, but driver doesn't handle it
# too gracefully.. https://gitlab.freedesktop.org/drm/msm/-/issues/15
kms_bw@linear-tiling-.*-displays-3840x2160p
# Until igt fix lands: https://patchwork.freedesktop.org/patch/493175/
kms_bw@linear-tiling-2.*
kms_bw@linear-tiling-3.*
kms_bw@linear-tiling-4.*
kms_bw@linear-tiling-5.*
kms_bw@linear-tiling-6.*
# igt fix posted: https://patchwork.freedesktop.org/patch/499926/
# failure mode is flakey due to randomization but fails frequently
# enough to be detected as a Crash or occasionally UnexpectedPass.
kms_plane_multiple@atomic-pipe-A-tiling-none
#kms_color.*

View File

@ -15,19 +15,16 @@ kms_color@pipe-A-ctm-max,Fail
kms_color@pipe-A-ctm-negative,Fail
kms_color@pipe-A-ctm-red-to-blue,Fail
kms_color@pipe-A-legacy-gamma,Fail
kms_cursor_legacy@basic-flip-after-cursor-atomic,Fail
kms_cursor_legacy@basic-flip-after-cursor-legacy,Fail
kms_cursor_legacy@basic-flip-after-cursor-varying-size,Fail
kms_cursor_legacy@basic-flip-before-cursor-atomic,Fail
kms_cursor_legacy@basic-flip-before-cursor-legacy,Fail
kms_cursor_legacy@basic-flip-before-cursor-varying-size,Fail
kms_cursor_legacy@cursor-vs-flip-atomic,Fail
kms_cursor_legacy@cursor-vs-flip-atomic-transitions,Fail
kms_cursor_legacy@cursor-vs-flip-atomic-transitions-varying-size,Fail
kms_cursor_legacy@cursor-vs-flip-legacy,Fail
kms_cursor_legacy@cursor-vs-flip-toggle,Fail
kms_cursor_legacy@cursor-vs-flip-varying-size,Fail
kms_cursor_legacy@cursorA-vs-flipA-toggle,Fail
kms_cursor_legacy@flip-vs-cursor-atomic,Fail
kms_cursor_legacy@flip-vs-cursor-crc-atomic,Fail
kms_cursor_legacy@flip-vs-cursor-crc-legacy,Fail
@ -35,11 +32,9 @@ kms_cursor_legacy@flip-vs-cursor-legacy,Fail
kms_cursor_legacy@short-flip-after-cursor-atomic-transitions,Fail
kms_cursor_legacy@short-flip-after-cursor-atomic-transitions-varying-size,Fail
kms_cursor_legacy@short-flip-after-cursor-toggle,Fail
kms_cursor_legacy@short-flip-before-cursor-atomic-transitions,Fail
kms_cursor_legacy@short-flip-before-cursor-atomic-transitions-varying-size,Fail
kms_flip@flip-vs-modeset-vs-hang,Fail
kms_flip@flip-vs-panning-vs-hang,Fail
kms_pipe_crc_basic@compare-crc-sanitycheck-nv12,Fail
kms_plane@pixel-format,Fail
kms_plane@pixel-format-source-clamping,Fail
kms_plane_alpha_blend@alpha-7efc,Fail
kms_plane_alpha_blend@coverage-7efc,Fail
kms_plane_alpha_blend@coverage-vs-premult-vs-constant,Fail

View File

@ -1,11 +1,12 @@
# Test ends up reading CRC from frame before cursor update
# bug
# sometimes.. tbd if this is a kernel CRC bug or a test
kms_cursor_crc@.*
kms_cursor_legacy@basic-flip-after-cursor-atomic
kms_cursor_legacy@basic-flip-before-cursor-varying-size
kms_cursor_legacy@cursorA-vs-flipA-toggle
kms_cursor_legacy@flip-vs-cursor-atomic-transitions
kms_cursor_legacy@flip-vs-cursor-toggle
kms_cursor_legacy@pipe-A-forked-bo
kms_cursor_legacy@pipe-A-forked-move
kms_cursor_legacy@flip-vs-cursor-varying-size
kms_cursor_legacy@short-flip-before-cursor-atomic-transitions
kms_cursor_legacy@short-flip-before-cursor-toggle
kms_flip@dpms-vs-vblank-race-interruptible
kms_flip@flip-vs-modeset-vs-hang
kms_flip@flip-vs-panning-vs-hang
kms_plane@pixel-format
kms_plane@pixel-format-source-clamping

View File

@ -0,0 +1,17 @@
git+https://gitlab.freedesktop.org/gfx-ci/ci-collate@09e7142715c16f54344ddf97013331ba063b162b
termcolor==2.3.0
# ci-collate dependencies
certifi==2023.7.22
charset-normalizer==3.2.0
idna==3.4
pip==23.2.1
python-gitlab==3.15.0
requests==2.31.0
requests-toolbelt==1.0.0
ruamel.yaml==0.17.32
ruamel.yaml.clib==0.2.7
setuptools==68.0.0
tenacity==8.2.3
urllib3==2.0.4
wheel==0.41.1

Some files were not shown because too many files have changed in this diff Show More