drm next for 6.4-rc1

New drivers:
 - add QAIC acceleration driver
 
 dma-buf:
 - constify kobj_type structs
 - Reject prime DMA-Buf attachment if get_sg_table is missing.
 
 fbdev:
 - cmdline parser fixes
 - implement fbdev emulation for GEM DMA drivers
 - always use shadow buffer in fbdev emulation helpers
 
 dma-fence:
 - add deadline hint to fences
 - signal private stub fence
 
 core:
 - improve DisplayID 2.0 and EDID parsing
 - add gem eviction function + callback
 - prep to convert shmem helper to GEM resv lock
 - move suballocator from radeon/amdgpu to core for Xe
 - HPD polling fixes
 - Documentation improvements
 - Add atomic enable_plane callback
 - use tgid instead of pid for client tracking
 - DP: Add SDP Error Detection Configuration Register
 - Add prime import/export to vram-helper
 - use pci aperture helpers in more drivers
 
 panel:
 - Radxa 8/10HD support
 - Samsung AMD495QA01 support
 - Elida KD50T048A
 - Sony TD4353
 - Novatek NT36523
 - STARRY 2081101QFH032011-53G
 - B133UAN01.0
 - AUO NE135FBM-N41
 
 i915:
 - More MTL enabling
 - fix s/r problems with MEI/PXP
 - Implement fb_dirty for PSR,FBC,DRRS fixes
 - Fix eDP+DSI dual panel systems
 - Fix issue #6333: "list_add corruption" and full system lockup from
   performance monitoring
 - Don't use stolen memory or BAR for ring buffers on LLC platforms
 - Make sure DSM size has correct 1MiB granularity on Gen12+
 - Whitelist COMMON_SLICE_CHICKEN3 for UMD access on Gen12+
 - Add engine TLB invalidation for Meteorlake
 - Fix GSC races on driver load/unload on Meteorlake+
 - Make kobj_type structures constant
 - Move fd_install after last use of fence
 - wm/vblank refactoring
 - display code refactoring
 - Create GSC submission targeting HDCP and PXP usages on MTL+
 - Enable HDCP2.x via GSC CS
 - Fix context runtime accounting on sysfs fdinfo for heavy workloads
 - Use i915 instead of dev_priv insied the file_priv structure
 - Replace fake flex-array with flexible-array member
 
 amdgpu:
 - Make kobj structures const
 - Generalize dmabuf import to work with KFD
 - Add capped/uncapped workload handling for supported APUs
 - Expose additional memory stats via fdinfo
 - Register vga_switcheroo for apple-gmux
 - Initial NBIO7.9, GC 9.4.3, GFXHUB 1.2, MMHUB 1.8 support
 - Initial DC FAM infrastructure
 - Link DC backlight to connector device rather than PCI device
 - Add sysfs nodes for secondary VCN clocks
 
 amdkfd:
 - Make kobj structures const
 - Support for exporting buffers via dmabuf
 - Multi-VMA page migration fixes
 - initial GC 9.4.3 support
 
 radeon:
 - iMac fix
 - convert to client based fbdev emulation
 
 habanalabs:
 - Add opcodes to the CS ioctl to allow user to stall/resume specific engines
   inside Gaudi2.
 - INFO ioctl the amount of device memory that the driver
   and f/w reserve for themselves.
 - INFO ioctl a bit-mask of the available rotator engines
 - INFO ioctl the register's address of the f/w that should
   be used to trigger interrupts
 - INFO ioctl two new opcodes to fetch information on h/w and f/w events
 - Enable graceful reset mechanism for compute-reset.
 - Align to the latest firmware specs.
 - Enforce the release order of the compute device and dma-buf.
 
 msm:
 - UBWC decoder programming rework
 - SM8550, SM8450 bindings update
 - uapi C++ fix
 - a3xx and a4xx devfreq support
 - GPU and GEM updates to avoid allocations which could trigger
   reclaim (shrinker) in fence signaling path
 - dma-fence deadline hint support and wait-boost
 - a640/650 speed bin support
 
 cirrus:
 - convert to regular atomic helpers
 - add damage clipping
 
 mediatek:
 - 10-bit overlay support
 - mt8195 support
 - Only trigger DRM HPD events if bridge is attached
 - Change the aux retries times when receiving AUX_DEFER
 
 rockchip:
 - add 4K support
 
 vc4:
 - use drm_gem_objects
 
 virtio:
 - allow KMS support to be disabled
 - add damage clipping
 
 vmwgfx:
 - buffer object lifetime fixes
 
 exynos:
 - move MIPI DSI driver to drm bridge for iMX sharing
 - use kernel fbdev emulation
 
 panfrost:
 - add support for mali MT81xx devices
 - add speed binning support
 
 lima:
 - add usage stats
 
 tegra:
 - fbdev client conversion
 
 vkms:
 - Add primary plane positioning support
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEEKbZHaGwW9KfbeusDHTzWXnEhr4FAmRGFU4ACgkQDHTzWXnE
 hr5m4w/9GzutylTH5aY+otFRNR6uKWGJZ9d90RLyLOE3vjE+7/Q/36EXPOjZctVt
 VgfQD1giKIGD9ENcCfwbw6iwyVAjLvinBr3Hz4NleEu1TjdXPJvgo9OW/+FQKVi6
 1vWH/mcnN6o89m3Mme7T2drFtwy3Y6/l5EY18yNyI7XeQVUMaDTr9Lvcriq0Sigc
 CInYxilIxViKioYZQmHihXPnZ89nNQZweN2GtDu8O9Bw1Z1eEyn0kRzb3px2Zl6T
 MpQEQasrPDdF3LFlVWs0AlKmLFbhqV9Pq/OPfowfAWT5RSXpeDvO95NaL3EPzFXy
 AO6jWHR7/VpvWvj4iJ6R35TLgi/CyASxjJ8Cr9k61Sb1U2WthMEmtd1BKBtI5mTq
 Us7yP2WJle3LXEqXyvDKDGsZf8kOQ4nyJx+3CJof5Tbnzy3hn+JUkTiUweSDQ14x
 CHEz7TI8WY5G96+zcyBcee0MWa3V6IXH0cjuMMUiSHw1uir34LuyP+plaELp3eqv
 MFf5WUJEuU9DmDlxRd2W+g6fmKWaEkY2ksWcbD7H3BZrBmnxkS4LIyfC9HJirGCC
 4JF4+4k55F/UAzQOi/4hQxulPtQmHug2/9c29IqZerwxekYdMRkb75rIoVf0IfF4
 uLexY0u3aO+IKZ7ygSL9MAwAyiJU6ulYigMLxWMjT7vU36CF5Z8=
 =NUEy
 -----END PGP SIGNATURE-----

Merge tag 'drm-next-2023-04-24' of git://anongit.freedesktop.org/drm/drm

Pull drm updates from Dave Airlie:
 "There is a new Qualcomm accel driver for their QAIC, dma-fence got a
  deadline feature added, lots of refactoring around fbdev emulation,
  and the usual pre-release hw enablements from AMD and Intel and fixes
  everywhere.

  New drivers:
   - add QAIC acceleration driver

  dma-buf:
   - constify kobj_type structs
   - Reject prime DMA-Buf attachment if get_sg_table is missing.

  fbdev:
   - cmdline parser fixes
   - implement fbdev emulation for GEM DMA drivers
   - always use shadow buffer in fbdev emulation helpers

  dma-fence:
   - add deadline hint to fences
   - signal private stub fence

  core:
   - improve DisplayID 2.0 and EDID parsing
   - add gem eviction function + callback
   - prep to convert shmem helper to GEM resv lock
   - move suballocator from radeon/amdgpu to core for Xe
   - HPD polling fixes
   - Documentation improvements
   - Add atomic enable_plane callback
   - use tgid instead of pid for client tracking
   - DP: Add SDP Error Detection Configuration Register
   - Add prime import/export to vram-helper
   - use pci aperture helpers in more drivers

  panel:
   - Radxa 8/10HD support
   - Samsung AMD495QA01 support
   - Elida KD50T048A
   - Sony TD4353
   - Novatek NT36523
   - STARRY 2081101QFH032011-53G
   - B133UAN01.0
   - AUO NE135FBM-N41

  i915:
   - More MTL enabling
   - fix s/r problems with MEI/PXP
   - Implement fb_dirty for PSR,FBC,DRRS fixes
   - Fix eDP+DSI dual panel systems
   - Fix issue #6333: "list_add corruption" and full system lockup from
     performance monitoring
   - Don't use stolen memory or BAR for ring buffers on LLC platforms
   - Make sure DSM size has correct 1MiB granularity on Gen12+
   - Whitelist COMMON_SLICE_CHICKEN3 for UMD access on Gen12+
   - Add engine TLB invalidation for Meteorlake
   - Fix GSC races on driver load/unload on Meteorlake+
   - Make kobj_type structures constant
   - Move fd_install after last use of fence
   - wm/vblank refactoring
   - display code refactoring
   - Create GSC submission targeting HDCP and PXP usages on MTL+
   - Enable HDCP2.x via GSC CS
   - Fix context runtime accounting on sysfs fdinfo for heavy workloads
   - Use i915 instead of dev_priv insied the file_priv structure
   - Replace fake flex-array with flexible-array member

  amdgpu:
   - Make kobj structures const
   - Generalize dmabuf import to work with KFD
   - Add capped/uncapped workload handling for supported APUs
   - Expose additional memory stats via fdinfo
   - Register vga_switcheroo for apple-gmux
   - Initial NBIO7.9, GC 9.4.3, GFXHUB 1.2, MMHUB 1.8 support
   - Initial DC FAM infrastructure
   - Link DC backlight to connector device rather than PCI device
   - Add sysfs nodes for secondary VCN clocks

  amdkfd:
   - Make kobj structures const
   - Support for exporting buffers via dmabuf
   - Multi-VMA page migration fixes
   - initial GC 9.4.3 support

  radeon:
   - iMac fix
   - convert to client based fbdev emulation

  habanalabs:
   - Add opcodes to the CS ioctl to allow user to stall/resume specific
     engines inside Gaudi2.
   - INFO ioctl the amount of device memory that the driver and f/w
     reserve for themselves.
   - INFO ioctl a bit-mask of the available rotator engines
   - INFO ioctl the register's address of the f/w that should be used to
     trigger interrupts
   - INFO ioctl two new opcodes to fetch information on h/w and f/w
     events
   - Enable graceful reset mechanism for compute-reset.
   - Align to the latest firmware specs.
   - Enforce the release order of the compute device and dma-buf.

  msm:
   - UBWC decoder programming rework
   - SM8550, SM8450 bindings update
   - uapi C++ fix
   - a3xx and a4xx devfreq support
   - GPU and GEM updates to avoid allocations which could trigger
     reclaim (shrinker) in fence signaling path
   - dma-fence deadline hint support and wait-boost
   - a640/650 speed bin support

  cirrus:
   - convert to regular atomic helpers
   - add damage clipping

  mediatek:
   - 10-bit overlay support
   - mt8195 support
   - Only trigger DRM HPD events if bridge is attached
   - Change the aux retries times when receiving AUX_DEFER

  rockchip:
   - add 4K support

  vc4:
   - use drm_gem_objects

  virtio:
   - allow KMS support to be disabled
   - add damage clipping

  vmwgfx:
   - buffer object lifetime fixes

  exynos:
   - move MIPI DSI driver to drm bridge for iMX sharing
   - use kernel fbdev emulation

  panfrost:
   - add support for mali MT81xx devices
   - add speed binning support

  lima:
   - add usage stats

  tegra:
   - fbdev client conversion

  vkms:
   - Add primary plane positioning support"

* tag 'drm-next-2023-04-24' of git://anongit.freedesktop.org/drm/drm: (1495 commits)
  drm/i915/dp_mst: Fix active port PLL selection for secondary MST streams
  drm/exynos: Implement fbdev emulation as in-kernel client
  drm/exynos: Initialize fbdev DRM client
  drm/exynos: Remove fb_helper from struct exynos_drm_private
  drm/exynos: Remove struct exynos_drm_fbdev
  drm/exynos: Remove exynos_gem from struct exynos_drm_fbdev
  drm/i915: Fix memory leaks in i915 selftests
  drm/i915: Make intel_get_crtc_new_encoder() less oopsy
  drm/i915/gt: Avoid out-of-bounds access when loading HuC
  drm/amdgpu: add some basic elements for multiple XCD case
  drm/amdgpu: move vmhub out of amdgpu_ring_funcs (v4)
  Revert "drm/amdgpu: enable ras for mp0 v13_0_10 on SRIOV"
  drm/amdgpu: add common ip block for GC 9.4.3
  drm/amd/display: Add logging when DP link training Clock recovery is Successful
  drm/amdgpu: add common early init support for GC 9.4.3
  drm/amdgpu: switch to v9_4_3 gfx_funcs callbacks for GC 9.4.3
  drm/amd/display: Add logging when setting DP sink power state fails
  drm/amdkfd: Add gfx_target_version for GC 9.4.3
  drm/amdkfd: Enable HW_UPDATE_RPTR on GC 9.4.3
  drm/amdgpu: reserve the old gc_11_0_*_mes.bin
  ...
This commit is contained in:
Linus Torvalds 2023-04-25 16:12:15 -07:00
commit c8cc58e289
1192 changed files with 183422 additions and 30711 deletions

View File

@ -14,7 +14,9 @@ Description: RW. Card reactive sustained (PL1/Tau) power limit in microwatts.
The power controller will throttle the operating frequency
if the power averaged over a window (typically seconds)
exceeds this limit.
exceeds this limit. A read value of 0 means that the PL1
power limit is disabled, writing 0 disables the
limit. Writing values > 0 will enable the power limit.
Only supported for particular Intel i915 graphics platforms.

View File

@ -8,6 +8,7 @@ Compute Accelerators
:maxdepth: 1
introduction
qaic/index
.. only:: subproject and html

View File

@ -0,0 +1,510 @@
.. SPDX-License-Identifier: GPL-2.0-only
===============================
Qualcomm Cloud AI 100 (AIC100)
===============================
Overview
========
The Qualcomm Cloud AI 100/AIC100 family of products (including SA9000P - part of
Snapdragon Ride) are PCIe adapter cards which contain a dedicated SoC ASIC for
the purpose of efficiently running Artificial Intelligence (AI) Deep Learning
inference workloads. They are AI accelerators.
The PCIe interface of AIC100 is capable of PCIe Gen4 speeds over eight lanes
(x8). An individual SoC on a card can have up to 16 NSPs for running workloads.
Each SoC has an A53 management CPU. On card, there can be up to 32 GB of DDR.
Multiple AIC100 cards can be hosted in a single system to scale overall
performance. AIC100 cards are multi-user capable and able to execute workloads
from multiple users in a concurrent manner.
Hardware Description
====================
An AIC100 card consists of an AIC100 SoC, on-card DDR, and a set of misc
peripherals (PMICs, etc).
An AIC100 card can either be a PCIe HHHL form factor (a traditional PCIe card),
or a Dual M.2 card. Both use PCIe to connect to the host system.
As a PCIe endpoint/adapter, AIC100 uses the standard VendorID(VID)/
DeviceID(DID) combination to uniquely identify itself to the host. AIC100
uses the standard Qualcomm VID (0x17cb). All AIC100 SKUs use the same
AIC100 DID (0xa100).
AIC100 does not implement FLR (function level reset).
AIC100 implements MSI but does not implement MSI-X. AIC100 requires 17 MSIs to
operate (1 for MHI, 16 for the DMA Bridge).
As a PCIe device, AIC100 utilizes BARs to provide host interfaces to the device
hardware. AIC100 provides 3, 64-bit BARs.
* The first BAR is 4K in size, and exposes the MHI interface to the host.
* The second BAR is 2M in size, and exposes the DMA Bridge interface to the
host.
* The third BAR is variable in size based on an individual AIC100's
configuration, but defaults to 64K. This BAR currently has no purpose.
From the host perspective, AIC100 has several key hardware components -
* MHI (Modem Host Interface)
* QSM (QAIC Service Manager)
* NSPs (Neural Signal Processor)
* DMA Bridge
* DDR
MHI
---
AIC100 has one MHI interface over PCIe. MHI itself is documented at
Documentation/mhi/index.rst MHI is the mechanism the host uses to communicate
with the QSM. Except for workload data via the DMA Bridge, all interaction with
the device occurs via MHI.
QSM
---
QAIC Service Manager. This is an ARM A53 CPU that runs the primary
firmware of the card and performs on-card management tasks. It also
communicates with the host via MHI. Each AIC100 has one of
these.
NSP
---
Neural Signal Processor. Each AIC100 has up to 16 of these. These are
the processors that run the workloads on AIC100. Each NSP is a Qualcomm Hexagon
(Q6) DSP with HVX and HMX. Each NSP can only run one workload at a time, but
multiple NSPs may be assigned to a single workload. Since each NSP can only run
one workload, AIC100 is limited to 16 concurrent workloads. Workload
"scheduling" is under the purview of the host. AIC100 does not automatically
timeslice.
DMA Bridge
----------
The DMA Bridge is custom DMA engine that manages the flow of data
in and out of workloads. AIC100 has one of these. The DMA Bridge has 16
channels, each consisting of a set of request/response FIFOs. Each active
workload is assigned a single DMA Bridge channel. The DMA Bridge exposes
hardware registers to manage the FIFOs (head/tail pointers), but requires host
memory to store the FIFOs.
DDR
---
AIC100 has on-card DDR. In total, an AIC100 can have up to 32 GB of DDR.
This DDR is used to store workloads, data for the workloads, and is used by the
QSM for managing the device. NSPs are granted access to sections of the DDR by
the QSM. The host does not have direct access to the DDR, and must make
requests to the QSM to transfer data to the DDR.
High-level Use Flow
===================
AIC100 is a multi-user, programmable accelerator typically used for running
neural networks in inferencing mode to efficiently perform AI operations.
AIC100 is not intended for training neural networks. AIC100 can be utilized
for generic compute workloads.
Assuming a user wants to utilize AIC100, they would follow these steps:
1. Compile the workload into an ELF targeting the NSP(s)
2. Make requests to the QSM to load the workload and related artifacts into the
device DDR
3. Make a request to the QSM to activate the workload onto a set of idle NSPs
4. Make requests to the DMA Bridge to send input data to the workload to be
processed, and other requests to receive processed output data from the
workload.
5. Once the workload is no longer required, make a request to the QSM to
deactivate the workload, thus putting the NSPs back into an idle state.
6. Once the workload and related artifacts are no longer needed for future
sessions, make requests to the QSM to unload the data from DDR. This frees
the DDR to be used by other users.
Boot Flow
=========
AIC100 uses a flashless boot flow, derived from Qualcomm MSMs.
When AIC100 is first powered on, it begins executing PBL (Primary Bootloader)
from ROM. PBL enumerates the PCIe link, and initializes the BHI (Boot Host
Interface) component of MHI.
Using BHI, the host points PBL to the location of the SBL (Secondary Bootloader)
image. The PBL pulls the image from the host, validates it, and begins
execution of SBL.
SBL initializes MHI, and uses MHI to notify the host that the device has entered
the SBL stage. SBL performs a number of operations:
* SBL initializes the majority of hardware (anything PBL left uninitialized),
including DDR.
* SBL offloads the bootlog to the host.
* SBL synchronizes timestamps with the host for future logging.
* SBL uses the Sahara protocol to obtain the runtime firmware images from the
host.
Once SBL has obtained and validated the runtime firmware, it brings the NSPs out
of reset, and jumps into the QSM.
The QSM uses MHI to notify the host that the device has entered the QSM stage
(AMSS in MHI terms). At this point, the AIC100 device is fully functional, and
ready to process workloads.
Userspace components
====================
Compiler
--------
An open compiler for AIC100 based on upstream LLVM can be found at:
https://github.com/quic/software-kit-for-qualcomm-cloud-ai-100-cc
Usermode Driver (UMD)
---------------------
An open UMD that interfaces with the qaic kernel driver can be found at:
https://github.com/quic/software-kit-for-qualcomm-cloud-ai-100
Sahara loader
-------------
An open implementation of the Sahara protocol called kickstart can be found at:
https://github.com/andersson/qdl
MHI Channels
============
AIC100 defines a number of MHI channels for different purposes. This is a list
of the defined channels, and their uses.
+----------------+---------+----------+----------------------------------------+
| Channel name | IDs | EEs | Purpose |
+================+=========+==========+========================================+
| QAIC_LOOPBACK | 0 & 1 | AMSS | Any data sent to the device on this |
| | | | channel is sent back to the host. |
+----------------+---------+----------+----------------------------------------+
| QAIC_SAHARA | 2 & 3 | SBL | Used by SBL to obtain the runtime |
| | | | firmware from the host. |
+----------------+---------+----------+----------------------------------------+
| QAIC_DIAG | 4 & 5 | AMSS | Used to communicate with QSM via the |
| | | | DIAG protocol. |
+----------------+---------+----------+----------------------------------------+
| QAIC_SSR | 6 & 7 | AMSS | Used to notify the host of subsystem |
| | | | restart events, and to offload SSR |
| | | | crashdumps. |
+----------------+---------+----------+----------------------------------------+
| QAIC_QDSS | 8 & 9 | AMSS | Used for the Qualcomm Debug Subsystem. |
+----------------+---------+----------+----------------------------------------+
| QAIC_CONTROL | 10 & 11 | AMSS | Used for the Neural Network Control |
| | | | (NNC) protocol. This is the primary |
| | | | channel between host and QSM for |
| | | | managing workloads. |
+----------------+---------+----------+----------------------------------------+
| QAIC_LOGGING | 12 & 13 | SBL | Used by the SBL to send the bootlog to |
| | | | the host. |
+----------------+---------+----------+----------------------------------------+
| QAIC_STATUS | 14 & 15 | AMSS | Used to notify the host of Reliability,|
| | | | Accessibility, Serviceability (RAS) |
| | | | events. |
+----------------+---------+----------+----------------------------------------+
| QAIC_TELEMETRY | 16 & 17 | AMSS | Used to get/set power/thermal/etc |
| | | | attributes. |
+----------------+---------+----------+----------------------------------------+
| QAIC_DEBUG | 18 & 19 | AMSS | Not used. |
+----------------+---------+----------+----------------------------------------+
| QAIC_TIMESYNC | 20 & 21 | SBL/AMSS | Used to synchronize timestamps in the |
| | | | device side logs with the host time |
| | | | source. |
+----------------+---------+----------+----------------------------------------+
DMA Bridge
==========
Overview
--------
The DMA Bridge is one of the main interfaces to the host from the device
(the other being MHI). As part of activating a workload to run on NSPs, the QSM
assigns that network a DMA Bridge channel. A workload's DMA Bridge channel
(DBC for short) is solely for the use of that workload and is not shared with
other workloads.
Each DBC is a pair of FIFOs that manage data in and out of the workload. One
FIFO is the request FIFO. The other FIFO is the response FIFO.
Each DBC contains 4 registers in hardware:
* Request FIFO head pointer (offset 0x0). Read only by the host. Indicates the
latest item in the FIFO the device has consumed.
* Request FIFO tail pointer (offset 0x4). Read/write by the host. Host
increments this register to add new items to the FIFO.
* Response FIFO head pointer (offset 0x8). Read/write by the host. Indicates
the latest item in the FIFO the host has consumed.
* Response FIFO tail pointer (offset 0xc). Read only by the host. Device
increments this register to add new items to the FIFO.
The values in each register are indexes in the FIFO. To get the location of the
FIFO element pointed to by the register: FIFO base address + register * element
size.
DBC registers are exposed to the host via the second BAR. Each DBC consumes
4KB of space in the BAR.
The actual FIFOs are backed by host memory. When sending a request to the QSM
to activate a network, the host must donate memory to be used for the FIFOs.
Due to internal mapping limitations of the device, a single contiguous chunk of
memory must be provided per DBC, which hosts both FIFOs. The request FIFO will
consume the beginning of the memory chunk, and the response FIFO will consume
the end of the memory chunk.
Request FIFO
------------
A request FIFO element has the following structure:
.. code-block:: c
struct request_elem {
u16 req_id;
u8 seq_id;
u8 pcie_dma_cmd;
u32 reserved;
u64 pcie_dma_source_addr;
u64 pcie_dma_dest_addr;
u32 pcie_dma_len;
u32 reserved;
u64 doorbell_addr;
u8 doorbell_attr;
u8 reserved;
u16 reserved;
u32 doorbell_data;
u32 sem_cmd0;
u32 sem_cmd1;
u32 sem_cmd2;
u32 sem_cmd3;
};
Request field descriptions:
req_id
request ID. A request FIFO element and a response FIFO element with
the same request ID refer to the same command.
seq_id
sequence ID within a request. Ignored by the DMA Bridge.
pcie_dma_cmd
describes the DMA element of this request.
* Bit(7) is the force msi flag, which overrides the DMA Bridge MSI logic
and generates a MSI when this request is complete, and QSM
configures the DMA Bridge to look at this bit.
* Bits(6:5) are reserved.
* Bit(4) is the completion code flag, and indicates that the DMA Bridge
shall generate a response FIFO element when this request is
complete.
* Bit(3) indicates if this request is a linked list transfer(0) or a bulk
transfer(1).
* Bit(2) is reserved.
* Bits(1:0) indicate the type of transfer. No transfer(0), to device(1),
from device(2). Value 3 is illegal.
pcie_dma_source_addr
source address for a bulk transfer, or the address of the linked list.
pcie_dma_dest_addr
destination address for a bulk transfer.
pcie_dma_len
length of the bulk transfer. Note that the size of this field
limits transfers to 4G in size.
doorbell_addr
address of the doorbell to ring when this request is complete.
doorbell_attr
doorbell attributes.
* Bit(7) indicates if a write to a doorbell is to occur.
* Bits(6:2) are reserved.
* Bits(1:0) contain the encoding of the doorbell length. 0 is 32-bit,
1 is 16-bit, 2 is 8-bit, 3 is reserved. The doorbell address
must be naturally aligned to the specified length.
doorbell_data
data to write to the doorbell. Only the bits corresponding to
the doorbell length are valid.
sem_cmdN
semaphore command.
* Bit(31) indicates this semaphore command is enabled.
* Bit(30) is the to-device DMA fence. Block this request until all
to-device DMA transfers are complete.
* Bit(29) is the from-device DMA fence. Block this request until all
from-device DMA transfers are complete.
* Bits(28:27) are reserved.
* Bits(26:24) are the semaphore command. 0 is NOP. 1 is init with the
specified value. 2 is increment. 3 is decrement. 4 is wait
until the semaphore is equal to the specified value. 5 is wait
until the semaphore is greater or equal to the specified value.
6 is "P", wait until semaphore is greater than 0, then
decrement by 1. 7 is reserved.
* Bit(23) is reserved.
* Bit(22) is the semaphore sync. 0 is post sync, which means that the
semaphore operation is done after the DMA transfer. 1 is
presync, which gates the DMA transfer. Only one presync is
allowed per request.
* Bit(21) is reserved.
* Bits(20:16) is the index of the semaphore to operate on.
* Bits(15:12) are reserved.
* Bits(11:0) are the semaphore value to use in operations.
Overall, a request is processed in 4 steps:
1. If specified, the presync semaphore condition must be true
2. If enabled, the DMA transfer occurs
3. If specified, the postsync semaphore conditions must be true
4. If enabled, the doorbell is written
By using the semaphores in conjunction with the workload running on the NSPs,
the data pipeline can be synchronized such that the host can queue multiple
requests of data for the workload to process, but the DMA Bridge will only copy
the data into the memory of the workload when the workload is ready to process
the next input.
Response FIFO
-------------
Once a request is fully processed, a response FIFO element is generated if
specified in pcie_dma_cmd. The structure of a response FIFO element:
.. code-block:: c
struct response_elem {
u16 req_id;
u16 completion_code;
};
req_id
matches the req_id of the request that generated this element.
completion_code
status of this request. 0 is success. Non-zero is an error.
The DMA Bridge will generate a MSI to the host as a reaction to activity in the
response FIFO of a DBC. The DMA Bridge hardware has an IRQ storm mitigation
algorithm, where it will only generate a MSI when the response FIFO transitions
from empty to non-empty (unless force MSI is enabled and triggered). In
response to this MSI, the host is expected to drain the response FIFO, and must
take care to handle any race conditions between draining the FIFO, and the
device inserting elements into the FIFO.
Neural Network Control (NNC) Protocol
=====================================
The NNC protocol is how the host makes requests to the QSM to manage workloads.
It uses the QAIC_CONTROL MHI channel.
Each NNC request is packaged into a message. Each message is a series of
transactions. A passthrough type transaction can contain elements known as
commands.
QSM requires NNC messages be little endian encoded and the fields be naturally
aligned. Since there are 64-bit elements in some NNC messages, 64-bit alignment
must be maintained.
A message contains a header and then a series of transactions. A message may be
at most 4K in size from QSM to the host. From the host to the QSM, a message
can be at most 64K (maximum size of a single MHI packet), but there is a
continuation feature where message N+1 can be marked as a continuation of
message N. This is used for exceedingly large DMA xfer transactions.
Transaction descriptions
------------------------
passthrough
Allows userspace to send an opaque payload directly to the QSM.
This is used for NNC commands. Userspace is responsible for managing
the QSM message requirements in the payload.
dma_xfer
DMA transfer. Describes an object that the QSM should DMA into the
device via address and size tuples.
activate
Activate a workload onto NSPs. The host must provide memory to be
used by the DBC.
deactivate
Deactivate an active workload and return the NSPs to idle.
status
Query the QSM about it's NNC implementation. Returns the NNC version,
and if CRC is used.
terminate
Release a user's resources.
dma_xfer_cont
Continuation of a previous DMA transfer. If a DMA transfer
cannot be specified in a single message (highly fragmented), this
transaction can be used to specify more ranges.
validate_partition
Query to QSM to determine if a partition identifier is valid.
Each message is tagged with a user id, and a partition id. The user id allows
QSM to track resources, and release them when the user goes away (eg the process
crashes). A partition id identifies the resource partition that QSM manages,
which this message applies to.
Messages may have CRCs. Messages should have CRCs applied until the QSM
reports via the status transaction that CRCs are not needed. The QSM on the
SA9000P requires CRCs for black channel safing.
Subsystem Restart (SSR)
=======================
SSR is the concept of limiting the impact of an error. An AIC100 device may
have multiple users, each with their own workload running. If the workload of
one user crashes, the fallout of that should be limited to that workload and not
impact other workloads. SSR accomplishes this.
If a particular workload crashes, QSM notifies the host via the QAIC_SSR MHI
channel. This notification identifies the workload by it's assigned DBC. A
multi-stage recovery process is then used to cleanup both sides, and get the
DBC/NSPs into a working state.
When SSR occurs, any state in the workload is lost. Any inputs that were in
process, or queued by not yet serviced, are lost. The loaded artifacts will
remain in on-card DDR, but the host will need to re-activate the workload if
it desires to recover the workload.
Reliability, Accessibility, Serviceability (RAS)
================================================
AIC100 is expected to be deployed in server systems where RAS ideology is
applied. Simply put, RAS is the concept of detecting, classifying, and
reporting errors. While PCIe has AER (Advanced Error Reporting) which factors
into RAS, AER does not allow for a device to report details about internal
errors. Therefore, AIC100 implements a custom RAS mechanism. When a RAS event
occurs, QSM will report the event with appropriate details via the QAIC_STATUS
MHI channel. A sysadmin may determine that a particular device needs
additional service based on RAS reports.
Telemetry
=========
QSM has the ability to report various physical attributes of the device, and in
some cases, to allow the host to control them. Examples include thermal limits,
thermal readings, and power readings. These items are communicated via the
QAIC_TELEMETRY MHI channel.

View File

@ -0,0 +1,13 @@
.. SPDX-License-Identifier: GPL-2.0-only
=====================================
accel/qaic Qualcomm Cloud AI driver
=====================================
The accel/qaic driver supports the Qualcomm Cloud AI machine learning
accelerator cards.
.. toctree::
qaic
aic100

View File

@ -0,0 +1,170 @@
.. SPDX-License-Identifier: GPL-2.0-only
=============
QAIC driver
=============
The QAIC driver is the Kernel Mode Driver (KMD) for the AIC100 family of AI
accelerator products.
Interrupts
==========
While the AIC100 DMA Bridge hardware implements an IRQ storm mitigation
mechanism, it is still possible for an IRQ storm to occur. A storm can happen
if the workload is particularly quick, and the host is responsive. If the host
can drain the response FIFO as quickly as the device can insert elements into
it, then the device will frequently transition the response FIFO from empty to
non-empty and generate MSIs at a rate equivalent to the speed of the
workload's ability to process inputs. The lprnet (license plate reader network)
workload is known to trigger this condition, and can generate in excess of 100k
MSIs per second. It has been observed that most systems cannot tolerate this
for long, and will crash due to some form of watchdog due to the overhead of
the interrupt controller interrupting the host CPU.
To mitigate this issue, the QAIC driver implements specific IRQ handling. When
QAIC receives an IRQ, it disables that line. This prevents the interrupt
controller from interrupting the CPU. Then AIC drains the FIFO. Once the FIFO
is drained, QAIC implements a "last chance" polling algorithm where QAIC will
sleep for a time to see if the workload will generate more activity. The IRQ
line remains disabled during this time. If no activity is detected, QAIC exits
polling mode and reenables the IRQ line.
This mitigation in QAIC is very effective. The same lprnet usecase that
generates 100k IRQs per second (per /proc/interrupts) is reduced to roughly 64
IRQs over 5 minutes while keeping the host system stable, and having the same
workload throughput performance (within run to run noise variation).
Neural Network Control (NNC) Protocol
=====================================
The implementation of NNC is split between the KMD (QAIC) and UMD. In general
QAIC understands how to encode/decode NNC wire protocol, and elements of the
protocol which require kernel space knowledge to process (for example, mapping
host memory to device IOVAs). QAIC understands the structure of a message, and
all of the transactions. QAIC does not understand commands (the payload of a
passthrough transaction).
QAIC handles and enforces the required little endianness and 64-bit alignment,
to the degree that it can. Since QAIC does not know the contents of a
passthrough transaction, it relies on the UMD to satisfy the requirements.
The terminate transaction is of particular use to QAIC. QAIC is not aware of
the resources that are loaded onto a device since the majority of that activity
occurs within NNC commands. As a result, QAIC does not have the means to
roll back userspace activity. To ensure that a userspace client's resources
are fully released in the case of a process crash, or a bug, QAIC uses the
terminate command to let QSM know when a user has gone away, and the resources
can be released.
QSM can report a version number of the NNC protocol it supports. This is in the
form of a Major number and a Minor number.
Major number updates indicate changes to the NNC protocol which impact the
message format, or transactions (impacts QAIC).
Minor number updates indicate changes to the NNC protocol which impact the
commands (does not impact QAIC).
uAPI
====
QAIC defines a number of driver specific IOCTLs as part of the userspace API.
This section describes those APIs.
DRM_IOCTL_QAIC_MANAGE
This IOCTL allows userspace to send a NNC request to the QSM. The call will
block until a response is received, or the request has timed out.
DRM_IOCTL_QAIC_CREATE_BO
This IOCTL allows userspace to allocate a buffer object (BO) which can send
or receive data from a workload. The call will return a GEM handle that
represents the allocated buffer. The BO is not usable until it has been
sliced (see DRM_IOCTL_QAIC_ATTACH_SLICE_BO).
DRM_IOCTL_QAIC_MMAP_BO
This IOCTL allows userspace to prepare an allocated BO to be mmap'd into the
userspace process.
DRM_IOCTL_QAIC_ATTACH_SLICE_BO
This IOCTL allows userspace to slice a BO in preparation for sending the BO
to the device. Slicing is the operation of describing what portions of a BO
get sent where to a workload. This requires a set of DMA transfers for the
DMA Bridge, and as such, locks the BO to a specific DBC.
DRM_IOCTL_QAIC_EXECUTE_BO
This IOCTL allows userspace to submit a set of sliced BOs to the device. The
call is non-blocking. Success only indicates that the BOs have been queued
to the device, but does not guarantee they have been executed.
DRM_IOCTL_QAIC_PARTIAL_EXECUTE_BO
This IOCTL operates like DRM_IOCTL_QAIC_EXECUTE_BO, but it allows userspace
to shrink the BOs sent to the device for this specific call. If a BO
typically has N inputs, but only a subset of those is available, this IOCTL
allows userspace to indicate that only the first M bytes of the BO should be
sent to the device to minimize data transfer overhead. This IOCTL dynamically
recomputes the slicing, and therefore has some processing overhead before the
BOs can be queued to the device.
DRM_IOCTL_QAIC_WAIT_BO
This IOCTL allows userspace to determine when a particular BO has been
processed by the device. The call will block until either the BO has been
processed and can be re-queued to the device, or a timeout occurs.
DRM_IOCTL_QAIC_PERF_STATS_BO
This IOCTL allows userspace to collect performance statistics on the most
recent execution of a BO. This allows userspace to construct an end to end
timeline of the BO processing for a performance analysis.
DRM_IOCTL_QAIC_PART_DEV
This IOCTL allows userspace to request a duplicate "shadow device". This extra
accelN device is associated with a specific partition of resources on the
AIC100 device and can be used for limiting a process to some subset of
resources.
Userspace Client Isolation
==========================
AIC100 supports multiple clients. Multiple DBCs can be consumed by a single
client, and multiple clients can each consume one or more DBCs. Workloads
may contain sensitive information therefore only the client that owns the
workload should be allowed to interface with the DBC.
Clients are identified by the instance associated with their open(). A client
may only use memory they allocate, and DBCs that are assigned to their
workloads. Attempts to access resources assigned to other clients will be
rejected.
Module parameters
=================
QAIC supports the following module parameters:
**datapath_polling (bool)**
Configures QAIC to use a polling thread for datapath events instead of relying
on the device interrupts. Useful for platforms with broken multiMSI. Must be
set at QAIC driver initialization. Default is 0 (off).
**mhi_timeout_ms (unsigned int)**
Sets the timeout value for MHI operations in milliseconds (ms). Must be set
at the time the driver detects a device. Default is 2000 (2 seconds).
**control_resp_timeout_s (unsigned int)**
Sets the timeout value for QSM responses to NNC messages in seconds (s). Must
be set at the time the driver is sending a request to QSM. Default is 60 (one
minute).
**wait_exec_default_timeout_ms (unsigned int)**
Sets the default timeout for the wait_exec ioctl in milliseconds (ms). Must be
set prior to the waic_exec ioctl call. A value specified in the ioctl call
overrides this for that call. Default is 5000 (5 seconds).
**datapath_poll_interval_us (unsigned int)**
Sets the polling interval in microseconds (us) when datapath polling is active.
Takes effect at the next polling interval. Default is 100 (100 us).

View File

@ -10,7 +10,7 @@ maintainers:
- Robin van der Gracht <robin@protonic.nl>
allOf:
- $ref: "/schemas/input/matrix-keymap.yaml#"
- $ref: /schemas/input/matrix-keymap.yaml#
properties:
compatible:

View File

@ -2,8 +2,8 @@
# Copyright 2019 BayLibre, SAS
%YAML 1.2
---
$id: "http://devicetree.org/schemas/display/amlogic,meson-dw-hdmi.yaml#"
$schema: "http://devicetree.org/meta-schemas/core.yaml#"
$id: http://devicetree.org/schemas/display/amlogic,meson-dw-hdmi.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Amlogic specific extensions to the Synopsys Designware HDMI Controller

View File

@ -2,8 +2,8 @@
# Copyright 2019 BayLibre, SAS
%YAML 1.2
---
$id: "http://devicetree.org/schemas/display/amlogic,meson-vpu.yaml#"
$schema: "http://devicetree.org/meta-schemas/core.yaml#"
$id: http://devicetree.org/schemas/display/amlogic,meson-vpu.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Amlogic Meson Display Controller

View File

@ -2,8 +2,8 @@
# Copyright 2019 Analogix Semiconductor, Inc.
%YAML 1.2
---
$id: "http://devicetree.org/schemas/display/bridge/analogix,anx7625.yaml#"
$schema: "http://devicetree.org/meta-schemas/core.yaml#"
$id: http://devicetree.org/schemas/display/bridge/analogix,anx7625.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Analogix ANX7625 SlimPort (4K Mobile HD Transmitter)

View File

@ -0,0 +1,63 @@
# SPDX-License-Identifier: GPL-2.0
%YAML 1.2
---
$id: http://devicetree.org/schemas/display/bridge/analogix,dp.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Analogix Display Port bridge
maintainers:
- Rob Herring <robh@kernel.org>
properties:
reg:
maxItems: 1
interrupts:
maxItems: 1
clocks: true
clock-names: true
phys: true
phy-names:
const: dp
force-hpd:
description:
Indicate driver need force hpd when hpd detect failed, this
is used for some eDP screen which don not have a hpd signal.
hpd-gpios:
description:
Hotplug detect GPIO.
Indicates which GPIO should be used for hotplug detection
ports:
$ref: /schemas/graph.yaml#/properties/ports
properties:
port@0:
$ref: /schemas/graph.yaml#/properties/port
description:
Input node to receive pixel data.
port@1:
$ref: /schemas/graph.yaml#/properties/port
description:
Port node with one endpoint connected to a dp-connector node.
required:
- port@0
- port@1
required:
- reg
- interrupts
- clock-names
- clocks
- ports
additionalProperties: true

View File

@ -1,51 +0,0 @@
Analogix Display Port bridge bindings
Required properties for dp-controller:
-compatible:
platform specific such as:
* "samsung,exynos5-dp"
* "rockchip,rk3288-dp"
* "rockchip,rk3399-edp"
-reg:
physical base address of the controller and length
of memory mapped region.
-interrupts:
interrupt combiner values.
-clocks:
from common clock binding: handle to dp clock.
-clock-names:
from common clock binding: Shall be "dp".
-phys:
from general PHY binding: the phandle for the PHY device.
-phy-names:
from general PHY binding: Should be "dp".
Optional properties for dp-controller:
-force-hpd:
Indicate driver need force hpd when hpd detect failed, this
is used for some eDP screen which don't have hpd signal.
-hpd-gpios:
Hotplug detect GPIO.
Indicates which GPIO should be used for hotplug detection
-port@[X]: SoC specific port nodes with endpoint definitions as defined
in Documentation/devicetree/bindings/media/video-interfaces.txt,
please refer to the SoC specific binding document:
* Documentation/devicetree/bindings/display/exynos/exynos_dp.txt
* Documentation/devicetree/bindings/display/rockchip/analogix_dp-rockchip.txt
[1]: Documentation/devicetree/bindings/media/video-interfaces.txt
-------------------------------------------------------------------------------
Example:
dp-controller {
compatible = "samsung,exynos5-dp";
reg = <0x145b0000 0x10000>;
interrupts = <10 3>;
interrupt-parent = <&combiner>;
clocks = <&clock 342>;
clock-names = "dp";
phys = <&dp_phy>;
phy-names = "dp";
};

View File

@ -1,8 +1,8 @@
# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
%YAML 1.2
---
$id: "http://devicetree.org/schemas/display/bridge/cdns,mhdp8546.yaml#"
$schema: "http://devicetree.org/meta-schemas/core.yaml#"
$id: http://devicetree.org/schemas/display/bridge/cdns,mhdp8546.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Cadence MHDP8546 bridge

View File

@ -18,7 +18,7 @@ properties:
maxItems: 1
edid-emulation:
$ref: "/schemas/types.yaml#/definitions/uint32"
$ref: /schemas/types.yaml#/definitions/uint32
description:
The EDID emulation entry to use
Value Resolution Description

View File

@ -0,0 +1,255 @@
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
%YAML 1.2
---
$id: http://devicetree.org/schemas/display/bridge/samsung,mipi-dsim.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Samsung MIPI DSIM bridge controller
maintainers:
- Inki Dae <inki.dae@samsung.com>
- Jagan Teki <jagan@amarulasolutions.com>
- Marek Szyprowski <m.szyprowski@samsung.com>
description: |
Samsung MIPI DSIM bridge controller can be found it on Exynos
and i.MX8M Mini/Nano/Plus SoC's.
properties:
compatible:
oneOf:
- enum:
- samsung,exynos3250-mipi-dsi
- samsung,exynos4210-mipi-dsi
- samsung,exynos5410-mipi-dsi
- samsung,exynos5422-mipi-dsi
- samsung,exynos5433-mipi-dsi
- fsl,imx8mm-mipi-dsim
- fsl,imx8mp-mipi-dsim
- items:
- const: fsl,imx8mn-mipi-dsim
- const: fsl,imx8mm-mipi-dsim
reg:
maxItems: 1
interrupts:
maxItems: 1
'#address-cells':
const: 1
'#size-cells':
const: 0
clocks:
minItems: 2
maxItems: 5
clock-names:
minItems: 2
maxItems: 5
samsung,phy-type:
$ref: /schemas/types.yaml#/definitions/uint32
description: phandle to the samsung phy-type
power-domains:
maxItems: 1
samsung,power-domain:
$ref: /schemas/types.yaml#/definitions/phandle
description: phandle to the associated samsung power domain
vddcore-supply:
description: MIPI DSIM Core voltage supply (e.g. 1.1V)
vddio-supply:
description: MIPI DSIM I/O and PLL voltage supply (e.g. 1.8V)
samsung,burst-clock-frequency:
$ref: /schemas/types.yaml#/definitions/uint32
description:
DSIM high speed burst mode frequency.
samsung,esc-clock-frequency:
$ref: /schemas/types.yaml#/definitions/uint32
description:
DSIM escape mode frequency.
samsung,pll-clock-frequency:
$ref: /schemas/types.yaml#/definitions/uint32
description:
DSIM oscillator clock frequency.
phys:
maxItems: 1
phy-names:
const: dsim
ports:
$ref: /schemas/graph.yaml#/properties/ports
properties:
port@0:
$ref: /schemas/graph.yaml#/properties/port
description:
Input port node to receive pixel data from the
display controller. Exactly one endpoint must be
specified.
port@1:
$ref: /schemas/graph.yaml#/properties/port
description:
DSI output port node to the panel or the next bridge
in the chain.
required:
- clock-names
- clocks
- compatible
- interrupts
- reg
- samsung,burst-clock-frequency
- samsung,esc-clock-frequency
- samsung,pll-clock-frequency
allOf:
- $ref: ../dsi-controller.yaml#
- if:
properties:
compatible:
contains:
const: samsung,exynos5433-mipi-dsi
then:
properties:
clocks:
minItems: 5
clock-names:
items:
- const: bus_clk
- const: phyclk_mipidphy0_bitclkdiv8
- const: phyclk_mipidphy0_rxclkesc0
- const: sclk_rgb_vclk_to_dsim0
- const: sclk_mipi
ports:
required:
- port@0
required:
- ports
- vddcore-supply
- vddio-supply
- if:
properties:
compatible:
contains:
const: samsung,exynos5410-mipi-dsi
then:
properties:
clocks:
minItems: 2
clock-names:
items:
- const: bus_clk
- const: pll_clk
required:
- vddcore-supply
- vddio-supply
- if:
properties:
compatible:
contains:
const: samsung,exynos4210-mipi-dsi
then:
properties:
clocks:
minItems: 2
clock-names:
items:
- const: bus_clk
- const: sclk_mipi
required:
- vddcore-supply
- vddio-supply
- if:
properties:
compatible:
contains:
const: samsung,exynos3250-mipi-dsi
then:
properties:
clocks:
minItems: 2
clock-names:
items:
- const: bus_clk
- const: pll_clk
required:
- vddcore-supply
- vddio-supply
- samsung,phy-type
additionalProperties:
type: object
examples:
- |
#include <dt-bindings/clock/exynos5433.h>
#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
dsi@13900000 {
compatible = "samsung,exynos5433-mipi-dsi";
reg = <0x13900000 0xC0>;
interrupts = <GIC_SPI 205 IRQ_TYPE_LEVEL_HIGH>;
phys = <&mipi_phy 1>;
phy-names = "dsim";
clocks = <&cmu_disp CLK_PCLK_DSIM0>,
<&cmu_disp CLK_PHYCLK_MIPIDPHY0_BITCLKDIV8>,
<&cmu_disp CLK_PHYCLK_MIPIDPHY0_RXCLKESC0>,
<&cmu_disp CLK_SCLK_RGB_VCLK_TO_DSIM0>,
<&cmu_disp CLK_SCLK_DSIM0>;
clock-names = "bus_clk",
"phyclk_mipidphy0_bitclkdiv8",
"phyclk_mipidphy0_rxclkesc0",
"sclk_rgb_vclk_to_dsim0",
"sclk_mipi";
power-domains = <&pd_disp>;
vddcore-supply = <&ldo6_reg>;
vddio-supply = <&ldo7_reg>;
samsung,burst-clock-frequency = <512000000>;
samsung,esc-clock-frequency = <16000000>;
samsung,pll-clock-frequency = <24000000>;
pinctrl-names = "default";
pinctrl-0 = <&te_irq>;
ports {
#address-cells = <1>;
#size-cells = <0>;
port@0 {
reg = <0>;
dsi_to_mic: endpoint {
remote-endpoint = <&mic_to_dsi>;
};
};
};
};

View File

@ -26,19 +26,9 @@ properties:
reg:
maxItems: 1
clocks:
items:
- description: Module clock
- description: DSI bus clock for either AHB and APB
- description: Pixel clock for the DPI/RGB input
minItems: 2
clocks: true
clock-names:
items:
- const: ref
- const: pclk
- const: px_clk
minItems: 2
clock-names: true
resets:
maxItems: 1

View File

@ -23,7 +23,7 @@ properties:
i2c address of the bridge, 0x68 or 0x0f, depending on bootstrap pins
clock-names:
const: "ref"
const: ref
clocks:
maxItems: 1

View File

@ -26,7 +26,7 @@ description:
properties:
$nodename:
const: "aux-bus"
const: aux-bus
panel:
$ref: panel/panel-common.yaml#

View File

@ -30,6 +30,15 @@ properties:
$nodename:
pattern: "^dsi(@.*)?$"
clock-master:
type: boolean
description:
Should be enabled if the host is being used in conjunction with
another DSI host to drive the same peripheral. Hardware supporting
such a configuration generally requires the data on both the busses
to be driven by the same clock. Only the DSI host instance
controlling this clock should contain this property.
"#address-cells":
const: 1
@ -52,15 +61,6 @@ patternProperties:
case the reg property can take multiple entries, one for each virtual
channel that the peripheral responds to.
clock-master:
type: boolean
description:
Should be enabled if the host is being used in conjunction with
another DSI host to drive the same peripheral. Hardware supporting
such a configuration generally requires the data on both the busses
to be driven by the same clock. Only the DSI host instance
controlling this clock should contain this property.
enforce-video-mode:
type: boolean
description:

View File

@ -50,7 +50,7 @@ Optional properties for dp-controller:
Documentation/devicetree/bindings/display/panel/display-timing.txt
For the below properties, please refer to Analogix DP binding document:
* Documentation/devicetree/bindings/display/bridge/analogix_dp.txt
* Documentation/devicetree/bindings/display/bridge/analogix,dp.yaml
-phys (required)
-phy-names (required)
-hpd-gpios (optional)

View File

@ -1,90 +0,0 @@
Exynos MIPI DSI Master
Required properties:
- compatible: value should be one of the following
"samsung,exynos3250-mipi-dsi" /* for Exynos3250/3472 SoCs */
"samsung,exynos4210-mipi-dsi" /* for Exynos4 SoCs */
"samsung,exynos5410-mipi-dsi" /* for Exynos5410/5420/5440 SoCs */
"samsung,exynos5422-mipi-dsi" /* for Exynos5422/5800 SoCs */
"samsung,exynos5433-mipi-dsi" /* for Exynos5433 SoCs */
- reg: physical base address and length of the registers set for the device
- interrupts: should contain DSI interrupt
- clocks: list of clock specifiers, must contain an entry for each required
entry in clock-names
- clock-names: should include "bus_clk"and "sclk_mipi" entries
the use of "pll_clk" is deprecated
- phys: list of phy specifiers, must contain an entry for each required
entry in phy-names
- phy-names: should include "dsim" entry
- vddcore-supply: MIPI DSIM Core voltage supply (e.g. 1.1V)
- vddio-supply: MIPI DSIM I/O and PLL voltage supply (e.g. 1.8V)
- samsung,pll-clock-frequency: specifies frequency of the oscillator clock
- #address-cells, #size-cells: should be set respectively to <1> and <0>
according to DSI host bindings (see MIPI DSI bindings [1])
- samsung,burst-clock-frequency: specifies DSI frequency in high-speed burst
mode
- samsung,esc-clock-frequency: specifies DSI frequency in escape mode
Optional properties:
- power-domains: a phandle to DSIM power domain node
Child nodes:
Should contain DSI peripheral nodes (see MIPI DSI bindings [1]).
Video interfaces:
Device node can contain following video interface port nodes according to [2]:
0: RGB input,
1: DSI output
[1]: Documentation/devicetree/bindings/display/mipi-dsi-bus.txt
[2]: Documentation/devicetree/bindings/media/video-interfaces.txt
Example:
dsi@11c80000 {
compatible = "samsung,exynos4210-mipi-dsi";
reg = <0x11C80000 0x10000>;
interrupts = <0 79 0>;
clocks = <&clock 286>, <&clock 143>;
clock-names = "bus_clk", "sclk_mipi";
phys = <&mipi_phy 1>;
phy-names = "dsim";
vddcore-supply = <&vusb_reg>;
vddio-supply = <&vmipi_reg>;
power-domains = <&pd_lcd0>;
#address-cells = <1>;
#size-cells = <0>;
samsung,pll-clock-frequency = <24000000>;
panel@1 {
reg = <0>;
...
port {
panel_ep: endpoint {
remote-endpoint = <&dsi_ep>;
};
};
};
ports {
#address-cells = <1>;
#size-cells = <0>;
port@0 {
reg = <0>;
decon_to_mic: endpoint {
remote-endpoint = <&mic_to_decon>;
};
};
port@1 {
reg = <1>;
dsi_ep: endpoint {
reg = <0>;
samsung,burst-clock-frequency = <500000000>;
samsung,esc-clock-frequency = <20000000>;
remote-endpoint = <&panel_ep>;
};
};
};
};

View File

@ -21,6 +21,9 @@ properties:
- fsl,imx25-fb
- fsl,imx27-fb
- const: fsl,imx21-fb
- items:
- const: fsl,imx25-lcdc
- const: fsl,imx21-lcdc
clocks:
maxItems: 3
@ -31,6 +34,9 @@ properties:
- const: ahb
- const: per
port:
$ref: /schemas/graph.yaml#/properties/port
display:
$ref: /schemas/types.yaml#/definitions/phandle
@ -59,17 +65,55 @@ properties:
description:
LCDC Sharp Configuration Register value.
allOf:
- if:
properties:
compatible:
contains:
enum:
- fsl,imx1-lcdc
- fsl,imx21-lcdc
then:
properties:
display: false
fsl,dmacr: false
fsl,lpccr: false
fsl,lscr1: false
required:
- port
else:
properties:
port: false
required:
- display
required:
- compatible
- clocks
- clock-names
- display
- interrupts
- reg
additionalProperties: false
examples:
- |
lcdc@53fbc000 {
compatible = "fsl,imx25-lcdc", "fsl,imx21-lcdc";
reg = <0x53fbc000 0x4000>;
interrupts = <39>;
clocks = <&clks 103>, <&clks 66>, <&clks 49>;
clock-names = "ipg", "ahb", "per";
port {
parallel_out: endpoint {
remote-endpoint = <&panel_in>;
};
};
};
- |
imxfb: fb@10021000 {
compatible = "fsl,imx21-fb";

View File

@ -2,8 +2,8 @@
# Copyright 2019 NXP
%YAML 1.2
---
$id: "http://devicetree.org/schemas/display/imx/nxp,imx8mq-dcss.yaml#"
$schema: "http://devicetree.org/meta-schemas/core.yaml#"
$id: http://devicetree.org/schemas/display/imx/nxp,imx8mq-dcss.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: iMX8MQ Display Controller Subsystem (DCSS)

View File

@ -27,13 +27,10 @@ properties:
- const: mediatek,mt8192-disp-ccorr
- items:
- enum:
- mediatek,mt8186-disp-ccorr
- mediatek,mt8188-disp-ccorr
- mediatek,mt8195-disp-ccorr
- const: mediatek,mt8192-disp-ccorr
- items:
- enum:
- mediatek,mt8186-disp-ccorr
- const: mediatek,mt8192-disp-ccorr
reg:
maxItems: 1

View File

@ -0,0 +1,182 @@
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
%YAML 1.2
---
$id: http://devicetree.org/schemas/display/mediatek/mediatek,ethdr.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: MediaTek Ethdr Device
maintainers:
- Chun-Kuang Hu <chunkuang.hu@kernel.org>
- Philipp Zabel <p.zabel@pengutronix.de>
description:
ETHDR (ET High Dynamic Range) is a MediaTek internal HDR engine and is
designed for HDR video and graphics conversion in the external display path.
It handles multiple HDR input types and performs tone mapping, color
space/color format conversion, and then combine different layers,
output the required HDR or SDR signal to the subsequent display path.
This engine is composed of two video frontends, two graphic frontends,
one video backend and a mixer. ETHDR has two DMA function blocks, DS and ADL.
These two function blocks read the pre-programmed registers from DRAM and
set them to HW in the v-blanking period.
properties:
compatible:
const: mediatek,mt8195-disp-ethdr
reg:
maxItems: 7
reg-names:
items:
- const: mixer
- const: vdo_fe0
- const: vdo_fe1
- const: gfx_fe0
- const: gfx_fe1
- const: vdo_be
- const: adl_ds
interrupts:
maxItems: 1
iommus:
minItems: 1
maxItems: 2
clocks:
items:
- description: mixer clock
- description: video frontend 0 clock
- description: video frontend 1 clock
- description: graphic frontend 0 clock
- description: graphic frontend 1 clock
- description: video backend clock
- description: autodownload and menuload clock
- description: video frontend 0 async clock
- description: video frontend 1 async clock
- description: graphic frontend 0 async clock
- description: graphic frontend 1 async clock
- description: video backend async clock
- description: ethdr top clock
clock-names:
items:
- const: mixer
- const: vdo_fe0
- const: vdo_fe1
- const: gfx_fe0
- const: gfx_fe1
- const: vdo_be
- const: adl_ds
- const: vdo_fe0_async
- const: vdo_fe1_async
- const: gfx_fe0_async
- const: gfx_fe1_async
- const: vdo_be_async
- const: ethdr_top
power-domains:
maxItems: 1
resets:
items:
- description: video frontend 0 async reset
- description: video frontend 1 async reset
- description: graphic frontend 0 async reset
- description: graphic frontend 1 async reset
- description: video backend async reset
reset-names:
items:
- const: vdo_fe0_async
- const: vdo_fe1_async
- const: gfx_fe0_async
- const: gfx_fe1_async
- const: vdo_be_async
mediatek,gce-client-reg:
$ref: /schemas/types.yaml#/definitions/phandle-array
minItems: 1
maxItems: 7
description: The register of display function block to be set by gce.
There are 4 arguments in this property, gce node, subsys id, offset and
register size. The subsys id is defined in the gce header of each chips
include/dt-bindings/gce/<chip>-gce.h, mapping to the register of display
function block.
required:
- compatible
- reg
- clocks
- clock-names
- interrupts
- power-domains
- resets
- mediatek,gce-client-reg
additionalProperties: false
examples:
- |
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/clock/mt8195-clk.h>
#include <dt-bindings/gce/mt8195-gce.h>
#include <dt-bindings/memory/mt8195-memory-port.h>
#include <dt-bindings/power/mt8195-power.h>
#include <dt-bindings/reset/mt8195-resets.h>
soc {
#address-cells = <2>;
#size-cells = <2>;
hdr-engine@1c114000 {
compatible = "mediatek,mt8195-disp-ethdr";
reg = <0 0x1c114000 0 0x1000>,
<0 0x1c115000 0 0x1000>,
<0 0x1c117000 0 0x1000>,
<0 0x1c119000 0 0x1000>,
<0 0x1c11a000 0 0x1000>,
<0 0x1c11b000 0 0x1000>,
<0 0x1c11c000 0 0x1000>;
reg-names = "mixer", "vdo_fe0", "vdo_fe1", "gfx_fe0", "gfx_fe1",
"vdo_be", "adl_ds";
mediatek,gce-client-reg = <&gce0 SUBSYS_1c11XXXX 0x4000 0x1000>,
<&gce0 SUBSYS_1c11XXXX 0x5000 0x1000>,
<&gce0 SUBSYS_1c11XXXX 0x7000 0x1000>,
<&gce0 SUBSYS_1c11XXXX 0x9000 0x1000>,
<&gce0 SUBSYS_1c11XXXX 0xa000 0x1000>,
<&gce0 SUBSYS_1c11XXXX 0xb000 0x1000>,
<&gce0 SUBSYS_1c11XXXX 0xc000 0x1000>;
clocks = <&vdosys1 CLK_VDO1_DISP_MIXER>,
<&vdosys1 CLK_VDO1_HDR_VDO_FE0>,
<&vdosys1 CLK_VDO1_HDR_VDO_FE1>,
<&vdosys1 CLK_VDO1_HDR_GFX_FE0>,
<&vdosys1 CLK_VDO1_HDR_GFX_FE1>,
<&vdosys1 CLK_VDO1_HDR_VDO_BE>,
<&vdosys1 CLK_VDO1_26M_SLOW>,
<&vdosys1 CLK_VDO1_HDR_VDO_FE0_DL_ASYNC>,
<&vdosys1 CLK_VDO1_HDR_VDO_FE1_DL_ASYNC>,
<&vdosys1 CLK_VDO1_HDR_GFX_FE0_DL_ASYNC>,
<&vdosys1 CLK_VDO1_HDR_GFX_FE1_DL_ASYNC>,
<&vdosys1 CLK_VDO1_HDR_VDO_BE_DL_ASYNC>,
<&topckgen CLK_TOP_ETHDR>;
clock-names = "mixer", "vdo_fe0", "vdo_fe1", "gfx_fe0", "gfx_fe1",
"vdo_be", "adl_ds", "vdo_fe0_async", "vdo_fe1_async",
"gfx_fe0_async", "gfx_fe1_async","vdo_be_async",
"ethdr_top";
power-domains = <&spm MT8195_POWER_DOMAIN_VDOSYS1>;
iommus = <&iommu_vpp M4U_PORT_L3_HDR_DS>,
<&iommu_vpp M4U_PORT_L3_HDR_ADL>;
interrupts = <GIC_SPI 517 IRQ_TYPE_LEVEL_HIGH 0>; /* disp mixer */
resets = <&vdosys1 MT8195_VDOSYS1_SW1_RST_B_HDR_VDO_FE0_DL_ASYNC>,
<&vdosys1 MT8195_VDOSYS1_SW1_RST_B_HDR_VDO_FE1_DL_ASYNC>,
<&vdosys1 MT8195_VDOSYS1_SW1_RST_B_HDR_GFX_FE0_DL_ASYNC>,
<&vdosys1 MT8195_VDOSYS1_SW1_RST_B_HDR_GFX_FE1_DL_ASYNC>,
<&vdosys1 MT8195_VDOSYS1_SW1_RST_B_HDR_VDO_BE_DL_ASYNC>;
reset-names = "vdo_fe0_async", "vdo_fe1_async", "gfx_fe0_async",
"gfx_fe1_async", "vdo_be_async";
};
};
...

View File

@ -50,7 +50,7 @@ properties:
- const: hdmi
mediatek,syscon-hdmi:
$ref: '/schemas/types.yaml#/definitions/phandle-array'
$ref: /schemas/types.yaml#/definitions/phandle-array
items:
- items:
- description: phandle to system configuration registers

View File

@ -15,16 +15,21 @@ description: |
properties:
compatible:
enum:
- qcom,sc7180-dp
- qcom,sc7280-dp
- qcom,sc7280-edp
- qcom,sc8180x-dp
- qcom,sc8180x-edp
- qcom,sc8280xp-dp
- qcom,sc8280xp-edp
- qcom,sdm845-dp
- qcom,sm8350-dp
oneOf:
- enum:
- qcom,sc7180-dp
- qcom,sc7280-dp
- qcom,sc7280-edp
- qcom,sc8180x-dp
- qcom,sc8180x-edp
- qcom,sc8280xp-dp
- qcom,sc8280xp-edp
- qcom,sdm845-dp
- qcom,sm8350-dp
- items:
- enum:
- qcom,sm8450-dp
- const: qcom,sm8350-dp
reg:
minItems: 4

View File

@ -25,16 +25,16 @@ properties:
- qcom,sc7280-dsi-ctrl
- qcom,sdm660-dsi-ctrl
- qcom,sdm845-dsi-ctrl
- qcom,sm6115-dsi-ctrl
- qcom,sm8150-dsi-ctrl
- qcom,sm8250-dsi-ctrl
- qcom,sm8350-dsi-ctrl
- qcom,sm8450-dsi-ctrl
- qcom,sm8550-dsi-ctrl
- const: qcom,mdss-dsi-ctrl
- items:
- enum:
- dsi-ctrl-6g-qcm2290
- const: qcom,mdss-dsi-ctrl
- enum:
- qcom,dsi-ctrl-6g-qcm2290
- qcom,mdss-dsi-ctrl # This should always come with an SoC-specific compatible
deprecated: true
reg:
@ -74,7 +74,7 @@ properties:
syscon-sfpb:
description: A phandle to mmss_sfpb syscon node (only for DSIv2).
$ref: "/schemas/types.yaml#/definitions/phandle"
$ref: /schemas/types.yaml#/definitions/phandle
qcom,dual-dsi-mode:
type: boolean
@ -105,14 +105,14 @@ properties:
type: object
ports:
$ref: "/schemas/graph.yaml#/properties/ports"
$ref: /schemas/graph.yaml#/properties/ports
description: |
Contains DSI controller input and output ports as children, each
containing one endpoint subnode.
properties:
port@0:
$ref: "/schemas/graph.yaml#/$defs/port-base"
$ref: /schemas/graph.yaml#/$defs/port-base
unevaluatedProperties: false
description: |
Input endpoints of the controller.
@ -128,7 +128,7 @@ properties:
enum: [ 0, 1, 2, 3 ]
port@1:
$ref: "/schemas/graph.yaml#/$defs/port-base"
$ref: /schemas/graph.yaml#/$defs/port-base
unevaluatedProperties: false
description: |
Output endpoints of the controller.
@ -351,6 +351,7 @@ allOf:
contains:
enum:
- qcom,sdm845-dsi-ctrl
- qcom,sm6115-dsi-ctrl
then:
properties:
clocks:

View File

@ -58,7 +58,7 @@ properties:
maximum: 31
qcom,phy-drive-ldo-level:
$ref: "/schemas/types.yaml#/definitions/uint32"
$ref: /schemas/types.yaml#/definitions/uint32
description:
The PHY LDO has an amplitude tuning feature to adjust the LDO output
for the HSTX drive. Use supported levels (mV) to offset the drive level

View File

@ -3,8 +3,8 @@
%YAML 1.2
---
$id: "http://devicetree.org/schemas/display/msm/gmu.yaml#"
$schema: "http://devicetree.org/meta-schemas/core.yaml#"
$id: http://devicetree.org/schemas/display/msm/gmu.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: GMU attached to certain Adreno GPUs

View File

@ -2,8 +2,8 @@
%YAML 1.2
---
$id: "http://devicetree.org/schemas/display/msm/gpu.yaml#"
$schema: "http://devicetree.org/meta-schemas/core.yaml#"
$id: http://devicetree.org/schemas/display/msm/gpu.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Adreno or Snapdragon GPUs

View File

@ -1,8 +1,8 @@
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
%YAML 1.2
---
$id: "http://devicetree.org/schemas/display/msm/mdp4.yaml#"
$schema: "http://devicetree.org/meta-schemas/core.yaml#"
$id: http://devicetree.org/schemas/display/msm/mdp4.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Qualcomm Adreno/Snapdragon MDP4 display controller

View File

@ -40,7 +40,13 @@ patternProperties:
type: object
properties:
compatible:
const: qcom,dsi-ctrl-6g-qcm2290
oneOf:
- items:
- const: qcom,sm6115-dsi-ctrl
- const: qcom,mdss-dsi-ctrl
- description: Old binding, please don't use
deprecated: true
const: qcom,dsi-ctrl-6g-qcm2290
"^phy@[0-9a-f]+$":
type: object
@ -114,7 +120,7 @@ examples:
};
dsi@5e94000 {
compatible = "qcom,dsi-ctrl-6g-qcm2290";
compatible = "qcom,sm6115-dsi-ctrl", "qcom,mdss-dsi-ctrl";
reg = <0x05e94000 0x400>;
reg-names = "dsi_ctrl";

View File

@ -54,7 +54,7 @@ patternProperties:
type: object
properties:
compatible:
const: qcom,dsi-phy-5nm-8450
const: qcom,sm8450-dsi-phy-5nm
required:
- compatible
@ -254,7 +254,7 @@ examples:
};
dsi0_phy: phy@ae94400 {
compatible = "qcom,dsi-phy-5nm-8450";
compatible = "qcom,sm8450-dsi-phy-5nm";
reg = <0x0ae94400 0x200>,
<0x0ae94600 0x280>,
<0x0ae94900 0x260>;
@ -325,7 +325,7 @@ examples:
};
dsi1_phy: phy@ae96400 {
compatible = "qcom,dsi-phy-5nm-8450";
compatible = "qcom,sm8450-dsi-phy-5nm";
reg = <0x0ae96400 0x200>,
<0x0ae96600 0x280>,
<0x0ae96900 0x260>;

View File

@ -0,0 +1,133 @@
# SPDX-License-Identifier: GPL-2.0-only or BSD-2-Clause
%YAML 1.2
---
$id: http://devicetree.org/schemas/display/msm/qcom,sm8550-dpu.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Qualcomm SM8550 Display DPU
maintainers:
- Neil Armstrong <neil.armstrong@linaro.org>
$ref: /schemas/display/msm/dpu-common.yaml#
properties:
compatible:
const: qcom,sm8550-dpu
reg:
items:
- description: Address offset and size for mdp register set
- description: Address offset and size for vbif register set
reg-names:
items:
- const: mdp
- const: vbif
clocks:
items:
- description: Display AHB
- description: Display hf axi
- description: Display MDSS ahb
- description: Display lut
- description: Display core
- description: Display vsync
clock-names:
items:
- const: bus
- const: nrt_bus
- const: iface
- const: lut
- const: core
- const: vsync
required:
- compatible
- reg
- reg-names
- clocks
- clock-names
unevaluatedProperties: false
examples:
- |
#include <dt-bindings/clock/qcom,sm8550-dispcc.h>
#include <dt-bindings/clock/qcom,sm8550-gcc.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/power/qcom-rpmpd.h>
display-controller@ae01000 {
compatible = "qcom,sm8550-dpu";
reg = <0x0ae01000 0x8f000>,
<0x0aeb0000 0x2008>;
reg-names = "mdp", "vbif";
clocks = <&gcc GCC_DISP_AHB_CLK>,
<&gcc GCC_DISP_HF_AXI_CLK>,
<&dispcc DISP_CC_MDSS_AHB_CLK>,
<&dispcc DISP_CC_MDSS_MDP_LUT_CLK>,
<&dispcc DISP_CC_MDSS_MDP_CLK>,
<&dispcc DISP_CC_MDSS_VSYNC_CLK>;
clock-names = "bus",
"nrt_bus",
"iface",
"lut",
"core",
"vsync";
assigned-clocks = <&dispcc DISP_CC_MDSS_VSYNC_CLK>;
assigned-clock-rates = <19200000>;
operating-points-v2 = <&mdp_opp_table>;
power-domains = <&rpmhpd SM8550_MMCX>;
interrupt-parent = <&mdss>;
interrupts = <0>;
ports {
#address-cells = <1>;
#size-cells = <0>;
port@0 {
reg = <0>;
dpu_intf1_out: endpoint {
remote-endpoint = <&dsi0_in>;
};
};
port@1 {
reg = <1>;
dpu_intf2_out: endpoint {
remote-endpoint = <&dsi1_in>;
};
};
};
mdp_opp_table: opp-table {
compatible = "operating-points-v2";
opp-200000000 {
opp-hz = /bits/ 64 <200000000>;
required-opps = <&rpmhpd_opp_low_svs>;
};
opp-325000000 {
opp-hz = /bits/ 64 <325000000>;
required-opps = <&rpmhpd_opp_svs>;
};
opp-375000000 {
opp-hz = /bits/ 64 <375000000>;
required-opps = <&rpmhpd_opp_svs_l1>;
};
opp-514000000 {
opp-hz = /bits/ 64 <514000000>;
required-opps = <&rpmhpd_opp_nom>;
};
};
};
...

View File

@ -0,0 +1,333 @@
# SPDX-License-Identifier: GPL-2.0-only or BSD-2-Clause
%YAML 1.2
---
$id: http://devicetree.org/schemas/display/msm/qcom,sm8550-mdss.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Qualcomm SM8550 Display MDSS
maintainers:
- Neil Armstrong <neil.armstrong@linaro.org>
description:
SM8550 MSM Mobile Display Subsystem(MDSS), which encapsulates sub-blocks like
DPU display controller, DSI and DP interfaces etc.
$ref: /schemas/display/msm/mdss-common.yaml#
properties:
compatible:
const: qcom,sm8550-mdss
clocks:
items:
- description: Display MDSS AHB
- description: Display AHB
- description: Display hf AXI
- description: Display core
iommus:
maxItems: 1
interconnects:
maxItems: 2
interconnect-names:
maxItems: 2
patternProperties:
"^display-controller@[0-9a-f]+$":
type: object
properties:
compatible:
const: qcom,sm8550-dpu
"^dsi@[0-9a-f]+$":
type: object
properties:
compatible:
items:
- const: qcom,sm8550-dsi-ctrl
- const: qcom,mdss-dsi-ctrl
"^phy@[0-9a-f]+$":
type: object
properties:
compatible:
const: qcom,sm8550-dsi-phy-4nm
required:
- compatible
unevaluatedProperties: false
examples:
- |
#include <dt-bindings/clock/qcom,sm8550-dispcc.h>
#include <dt-bindings/clock/qcom,sm8550-gcc.h>
#include <dt-bindings/clock/qcom,rpmh.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/interconnect/qcom,sm8550-rpmh.h>
#include <dt-bindings/power/qcom-rpmpd.h>
display-subsystem@ae00000 {
compatible = "qcom,sm8550-mdss";
reg = <0x0ae00000 0x1000>;
reg-names = "mdss";
interconnects = <&mmss_noc MASTER_MDP 0 &gem_noc SLAVE_LLCC 0>,
<&mc_virt MASTER_LLCC 0 &mc_virt SLAVE_EBI1 0>;
interconnect-names = "mdp0-mem", "mdp1-mem";
resets = <&dispcc DISP_CC_MDSS_CORE_BCR>;
power-domains = <&dispcc MDSS_GDSC>;
clocks = <&dispcc DISP_CC_MDSS_AHB_CLK>,
<&gcc GCC_DISP_AHB_CLK>,
<&gcc GCC_DISP_HF_AXI_CLK>,
<&dispcc DISP_CC_MDSS_MDP_CLK>;
clock-names = "iface", "bus", "nrt_bus", "core";
interrupts = <GIC_SPI 83 IRQ_TYPE_LEVEL_HIGH>;
interrupt-controller;
#interrupt-cells = <1>;
iommus = <&apps_smmu 0x1c00 0x2>;
#address-cells = <1>;
#size-cells = <1>;
ranges;
display-controller@ae01000 {
compatible = "qcom,sm8550-dpu";
reg = <0x0ae01000 0x8f000>,
<0x0aeb0000 0x2008>;
reg-names = "mdp", "vbif";
clocks = <&gcc GCC_DISP_AHB_CLK>,
<&gcc GCC_DISP_HF_AXI_CLK>,
<&dispcc DISP_CC_MDSS_AHB_CLK>,
<&dispcc DISP_CC_MDSS_MDP_LUT_CLK>,
<&dispcc DISP_CC_MDSS_MDP_CLK>,
<&dispcc DISP_CC_MDSS_VSYNC_CLK>;
clock-names = "bus",
"nrt_bus",
"iface",
"lut",
"core",
"vsync";
assigned-clocks = <&dispcc DISP_CC_MDSS_VSYNC_CLK>;
assigned-clock-rates = <19200000>;
operating-points-v2 = <&mdp_opp_table>;
power-domains = <&rpmhpd SM8550_MMCX>;
interrupt-parent = <&mdss>;
interrupts = <0>;
ports {
#address-cells = <1>;
#size-cells = <0>;
port@0 {
reg = <0>;
dpu_intf1_out: endpoint {
remote-endpoint = <&dsi0_in>;
};
};
port@1 {
reg = <1>;
dpu_intf2_out: endpoint {
remote-endpoint = <&dsi1_in>;
};
};
};
mdp_opp_table: opp-table {
compatible = "operating-points-v2";
opp-200000000 {
opp-hz = /bits/ 64 <200000000>;
required-opps = <&rpmhpd_opp_low_svs>;
};
opp-325000000 {
opp-hz = /bits/ 64 <325000000>;
required-opps = <&rpmhpd_opp_svs>;
};
opp-375000000 {
opp-hz = /bits/ 64 <375000000>;
required-opps = <&rpmhpd_opp_svs_l1>;
};
opp-514000000 {
opp-hz = /bits/ 64 <514000000>;
required-opps = <&rpmhpd_opp_nom>;
};
};
};
dsi@ae94000 {
compatible = "qcom,sm8550-dsi-ctrl", "qcom,mdss-dsi-ctrl";
reg = <0x0ae94000 0x400>;
reg-names = "dsi_ctrl";
interrupt-parent = <&mdss>;
interrupts = <4>;
clocks = <&dispcc DISP_CC_MDSS_BYTE0_CLK>,
<&dispcc DISP_CC_MDSS_BYTE0_INTF_CLK>,
<&dispcc DISP_CC_MDSS_PCLK0_CLK>,
<&dispcc DISP_CC_MDSS_ESC0_CLK>,
<&dispcc DISP_CC_MDSS_AHB_CLK>,
<&gcc GCC_DISP_HF_AXI_CLK>;
clock-names = "byte",
"byte_intf",
"pixel",
"core",
"iface",
"bus";
assigned-clocks = <&dispcc DISP_CC_MDSS_BYTE0_CLK_SRC>,
<&dispcc DISP_CC_MDSS_PCLK0_CLK_SRC>;
assigned-clock-parents = <&dsi0_phy 0>, <&dsi0_phy 1>;
operating-points-v2 = <&dsi_opp_table>;
power-domains = <&rpmhpd SM8550_MMCX>;
phys = <&dsi0_phy>;
phy-names = "dsi";
#address-cells = <1>;
#size-cells = <0>;
ports {
#address-cells = <1>;
#size-cells = <0>;
port@0 {
reg = <0>;
dsi0_in: endpoint {
remote-endpoint = <&dpu_intf1_out>;
};
};
port@1 {
reg = <1>;
dsi0_out: endpoint {
};
};
};
dsi_opp_table: opp-table {
compatible = "operating-points-v2";
opp-187500000 {
opp-hz = /bits/ 64 <187500000>;
required-opps = <&rpmhpd_opp_low_svs>;
};
opp-300000000 {
opp-hz = /bits/ 64 <300000000>;
required-opps = <&rpmhpd_opp_svs>;
};
opp-358000000 {
opp-hz = /bits/ 64 <358000000>;
required-opps = <&rpmhpd_opp_svs_l1>;
};
};
};
dsi0_phy: phy@ae94400 {
compatible = "qcom,sm8550-dsi-phy-4nm";
reg = <0x0ae95000 0x200>,
<0x0ae95200 0x280>,
<0x0ae95500 0x400>;
reg-names = "dsi_phy",
"dsi_phy_lane",
"dsi_pll";
#clock-cells = <1>;
#phy-cells = <0>;
clocks = <&dispcc DISP_CC_MDSS_AHB_CLK>,
<&rpmhcc RPMH_CXO_CLK>;
clock-names = "iface", "ref";
};
dsi@ae96000 {
compatible = "qcom,sm8550-dsi-ctrl", "qcom,mdss-dsi-ctrl";
reg = <0x0ae96000 0x400>;
reg-names = "dsi_ctrl";
interrupt-parent = <&mdss>;
interrupts = <5>;
clocks = <&dispcc DISP_CC_MDSS_BYTE1_CLK>,
<&dispcc DISP_CC_MDSS_BYTE1_INTF_CLK>,
<&dispcc DISP_CC_MDSS_PCLK1_CLK>,
<&dispcc DISP_CC_MDSS_ESC1_CLK>,
<&dispcc DISP_CC_MDSS_AHB_CLK>,
<&gcc GCC_DISP_HF_AXI_CLK>;
clock-names = "byte",
"byte_intf",
"pixel",
"core",
"iface",
"bus";
assigned-clocks = <&dispcc DISP_CC_MDSS_BYTE1_CLK_SRC>,
<&dispcc DISP_CC_MDSS_PCLK1_CLK_SRC>;
assigned-clock-parents = <&dsi1_phy 0>, <&dsi1_phy 1>;
operating-points-v2 = <&dsi_opp_table>;
power-domains = <&rpmhpd SM8550_MMCX>;
phys = <&dsi1_phy>;
phy-names = "dsi";
#address-cells = <1>;
#size-cells = <0>;
ports {
#address-cells = <1>;
#size-cells = <0>;
port@0 {
reg = <0>;
dsi1_in: endpoint {
remote-endpoint = <&dpu_intf2_out>;
};
};
port@1 {
reg = <1>;
dsi1_out: endpoint {
};
};
};
};
dsi1_phy: phy@ae96400 {
compatible = "qcom,sm8550-dsi-phy-4nm";
reg = <0x0ae97000 0x200>,
<0x0ae97200 0x280>,
<0x0ae97500 0x400>;
reg-names = "dsi_phy",
"dsi_phy_lane",
"dsi_pll";
#clock-cells = <1>;
#phy-cells = <0>;
clocks = <&dispcc DISP_CC_MDSS_AHB_CLK>,
<&rpmhcc RPMH_CXO_CLK>;
clock-names = "iface", "ref";
};
};
...

View File

@ -30,6 +30,8 @@ properties:
- boe,tv110c9m-ll3
# INX HJ110IZ-01A 10.95" WUXGA TFT LCD panel
- innolux,hj110iz-01a
# STARRY 2081101QFH032011-53G 10.1" WUXGA TFT LCD panel
- starry,2081101qfh032011-53g
reg:
description: the virtual channel number of a DSI peripheral
@ -53,6 +55,7 @@ properties:
description: phandle of the backlight device attached to the panel
port: true
rotation: true
required:
- compatible

View File

@ -17,7 +17,9 @@ properties:
const: elida,kd35t133
reg: true
backlight: true
port: true
reset-gpios: true
rotation: true
iovcc-supply:
description: regulator that supplies the iovcc voltage
vdd-supply:
@ -27,6 +29,7 @@ required:
- compatible
- reg
- backlight
- port
- iovcc-supply
- vdd-supply
@ -43,6 +46,12 @@ examples:
backlight = <&backlight>;
iovcc-supply = <&vcc_1v8>;
vdd-supply = <&vcc3v3_lcd>;
port {
mipi_in_panel: endpoint {
remote-endpoint = <&mipi_out_panel>;
};
};
};
};

View File

@ -26,6 +26,7 @@ properties:
dvdd-supply:
description: 3v3 digital regulator
port: true
reset-gpios: true
backlight: true
@ -35,6 +36,7 @@ required:
- reg
- avdd-supply
- dvdd-supply
- port
additionalProperties: false
@ -53,5 +55,11 @@ examples:
dvdd-supply = <&reg_dldo2>;
reset-gpios = <&pio 3 24 GPIO_ACTIVE_HIGH>; /* LCD-RST: PD24 */
backlight = <&backlight>;
port {
mipi_in_panel: endpoint {
remote-endpoint = <&mipi_out_panel>;
};
};
};
};

View File

@ -1,43 +0,0 @@
# SPDX-License-Identifier: GPL-2.0
%YAML 1.2
---
$id: http://devicetree.org/schemas/display/panel/innolux,p120zdg-bf1.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Innolux P120ZDG-BF1 12.02 inch eDP 2K display panel
maintainers:
- Sandeep Panda <spanda@codeaurora.org>
- Douglas Anderson <dianders@chromium.org>
allOf:
- $ref: panel-common.yaml#
properties:
compatible:
const: innolux,p120zdg-bf1
enable-gpios: true
power-supply: true
backlight: true
no-hpd: true
required:
- compatible
- power-supply
additionalProperties: false
examples:
- |
#include <dt-bindings/gpio/gpio.h>
panel_edp: panel-edp {
compatible = "innolux,p120zdg-bf1";
enable-gpios = <&msmgpio 31 GPIO_ACTIVE_LOW>;
power-supply = <&pm8916_l2>;
backlight = <&backlight>;
no-hpd;
};
...

View File

@ -17,6 +17,8 @@ properties:
items:
- enum:
- chongzhou,cz101b4001
- radxa,display-10hd-ad001
- radxa,display-8hd-ad002
- const: jadard,jd9365da-h3
reg: true

View File

@ -0,0 +1,85 @@
# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
%YAML 1.2
---
$id: http://devicetree.org/schemas/display/panel/novatek,nt36523.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Novatek NT36523 based DSI display Panels
maintainers:
- Jianhua Lu <lujianhua000@gmail.com>
description: |
The Novatek NT36523 is a generic DSI Panel IC used to drive dsi
panels. Support video mode panels from China Star Optoelectronics
Technology (CSOT) and BOE Technology.
allOf:
- $ref: panel-common.yaml#
properties:
compatible:
items:
- enum:
- xiaomi,elish-boe-nt36523
- xiaomi,elish-csot-nt36523
- const: novatek,nt36523
reset-gpios:
maxItems: 1
description: phandle of gpio for reset line - This should be 8mA
vddio-supply:
description: regulator that supplies the I/O voltage
reg: true
ports: true
backlight: true
required:
- compatible
- reg
- vddio-supply
- reset-gpios
- ports
unevaluatedProperties: false
examples:
- |
#include <dt-bindings/gpio/gpio.h>
dsi {
#address-cells = <1>;
#size-cells = <0>;
panel@0 {
compatible = "xiaomi,elish-csot-nt36523", "novatek,nt36523";
reg = <0>;
vddio-supply = <&vreg_l14a_1p88>;
reset-gpios = <&tlmm 75 GPIO_ACTIVE_LOW>;
backlight = <&backlight>;
ports {
#address-cells = <1>;
#size-cells = <0>;
port@0 {
reg = <0>;
panel_in_0: endpoint {
remote-endpoint = <&dsi0_out>;
};
};
port@1{
reg = <1>;
panel_in_1: endpoint {
remote-endpoint = <&dsi1_out>;
};
};
};
};
};
...

View File

@ -34,7 +34,7 @@ properties:
description: phandle of gpio for reset line - This should be 8mA, gpio
can be configured using mux, pinctrl, pinctrl-names (active high)
vddi0-supply:
vddio-supply:
description: phandle of the regulator that provides the supply voltage
Power IC supply
@ -51,7 +51,7 @@ properties:
required:
- compatible
- reg
- vddi0-supply
- vddio-supply
- vddpos-supply
- vddneg-supply
- reset-gpios
@ -70,7 +70,7 @@ examples:
panel@0 {
compatible = "tianma,fhd-video", "novatek,nt36672a";
reg = <0>;
vddi0-supply = <&vreg_l14a_1p88>;
vddio-supply = <&vreg_l14a_1p88>;
vddpos-supply = <&lab>;
vddneg-supply = <&ibb>;

View File

@ -19,9 +19,6 @@ description: |
If the panel is more advanced a dedicated binding file is required.
allOf:
- $ref: panel-common.yaml#
properties:
compatible:
@ -67,12 +64,31 @@ properties:
reset-gpios: true
port: true
power-supply: true
vddio-supply: true
allOf:
- $ref: panel-common.yaml#
- if:
properties:
compatible:
enum:
- samsung,s6e3fc2x01
- samsung,sofef00
then:
properties:
power-supply: false
required:
- vddio-supply
else:
properties:
vddio-supply: false
required:
- power-supply
additionalProperties: false
required:
- compatible
- power-supply
- reg
examples:

View File

@ -192,6 +192,8 @@ properties:
- innolux,n125hce-gn1
# InnoLux 15.6" WXGA TFT LCD panel
- innolux,n156bge-l21
# Innolux P120ZDG-BF1 12.02 inch eDP 2K display panel
- innolux,p120zdg-bf1
# Innolux Corporation 7.0" WSVGA (1024x600) TFT LCD panel
- innolux,zj070na-01p
# King & Display KD116N21-30NV-A010 eDP TFT LCD panel

View File

@ -17,29 +17,29 @@ description: |
The parameters are defined as seen in the following illustration.
+----------+-------------------------------------+----------+-------+
| | ^ | | |
| | |vback_porch | | |
| | v | | |
+----------#######################################----------+-------+
| # ^ # | |
| # | # | |
| hback # | # hfront | hsync |
| porch # | hactive # porch | len |
|<-------->#<-------+--------------------------->#<-------->|<----->|
| # | # | |
| # |vactive # | |
| # | # | |
| # v # | |
+----------#######################################----------+-------+
| | ^ | | |
| | |vfront_porch | | |
| | v | | |
+----------+-------------------------------------+----------+-------+
| | ^ | | |
| | |vsync_len | | |
| | v | | |
+----------+-------------------------------------+----------+-------+
+-------+----------+-------------------------------------+----------+
| | | ^ | |
| | | |vsync_len | |
| | | v | |
+-------+----------+-------------------------------------+----------+
| | | ^ | |
| | | |vback_porch | |
| | | v | |
+-------+----------#######################################----------+
| | # ^ # |
| | # | # |
| hsync | hback # | # hfront |
| len | porch # | hactive # porch |
|<----->|<-------->#<-------+--------------------------->#<-------->|
| | # | # |
| | # |vactive # |
| | # | # |
| | # v # |
+-------+----------#######################################----------+
| | | ^ | |
| | | |vfront_porch | |
| | | v | |
+-------+----------+-------------------------------------+----------+
The following is the panel timings shown with time on the x-axis.

View File

@ -37,7 +37,7 @@ properties:
backlight:
description: Backlight used by the panel
$ref: "/schemas/types.yaml#/definitions/phandle"
$ref: /schemas/types.yaml#/definitions/phandle
required:
- compatible

View File

@ -0,0 +1,57 @@
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
%YAML 1.2
---
$id: http://devicetree.org/schemas/display/panel/samsung,ams495qa01.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Samsung AMS495QA01 panel with Magnachip D53E6EA8966 controller
maintainers:
- Chris Morgan <macromorgan@hotmail.com>
allOf:
- $ref: panel-common.yaml#
properties:
compatible:
const: samsung,ams495qa01
reg: true
reset-gpios:
description: reset gpio, must be GPIO_ACTIVE_LOW
elvdd-supply:
description: regulator that supplies voltage to the panel display
enable-gpios: true
port: true
vdd-supply:
description: regulator that supplies voltage to panel logic
required:
- compatible
- reg
- reset-gpios
- vdd-supply
additionalProperties: false
examples:
- |
#include <dt-bindings/gpio/gpio.h>
spi {
#address-cells = <1>;
#size-cells = <0>;
panel@0 {
compatible = "samsung,ams495qa01";
reg = <0>;
reset-gpios = <&gpio4 0 GPIO_ACTIVE_LOW>;
vdd-supply = <&vcc_3v3>;
port {
mipi_in_panel: endpoint {
remote-endpoint = <&mipi_out_panel>;
};
};
};
};
...

View File

@ -16,6 +16,7 @@ properties:
compatible:
const: samsung,s6e88a0-ams452ef01
reg: true
port: true
reset-gpios: true
vdd3-supply:
description: core voltage supply
@ -25,6 +26,7 @@ properties:
required:
- compatible
- reg
- port
- vdd3-supply
- vci-supply
- reset-gpios
@ -46,5 +48,11 @@ examples:
vdd3-supply = <&pm8916_l17>;
vci-supply = <&reg_vlcd_vci>;
reset-gpios = <&msmgpio 25 GPIO_ACTIVE_HIGH>;
port {
panel_in: endpoint {
remote-endpoint = <&dsi0_out>;
};
};
};
};

View File

@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: Seiko Instruments Inc. 4.3" WVGA (800 x RGB x 480) TFT with Touch-Panel
maintainers:
- Marco Franchi <marco.franchi@nxp.com>
- Fabio Estevam <festevam@gmail.com>
allOf:
- $ref: panel-common.yaml#
@ -25,6 +25,8 @@ properties:
avdd-supply:
description: 5v analog regulator
enable-gpios: true
required:
- compatible
- dvdd-supply

View File

@ -28,6 +28,7 @@ properties:
items:
- enum:
- densitron,dmt028vghmcmi-1a
- elida,kd50t048a
- techstar,ts8550b
- const: sitronix,st7701
@ -41,7 +42,9 @@ properties:
IOVCC-supply:
description: I/O system regulator
port: true
reset-gpios: true
rotation: true
backlight: true
@ -50,6 +53,7 @@ required:
- reg
- VCC-supply
- IOVCC-supply
- port
- reset-gpios
additionalProperties: false
@ -69,5 +73,11 @@ examples:
IOVCC-supply = <&reg_dldo2>;
reset-gpios = <&pio 3 24 GPIO_ACTIVE_HIGH>; /* LCD-RST: PD24 */
backlight = <&backlight>;
port {
mipi_in_panel: endpoint {
remote-endpoint = <&mipi_out_panel>;
};
};
};
};

View File

@ -26,6 +26,10 @@ properties:
spi-cpha: true
spi-cpol: true
dc-gpios:
maxItems: 1
description: DCX pin, Display data/command selection pin in parallel interface
required:
- compatible
- reg

View File

@ -0,0 +1,82 @@
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
%YAML 1.2
---
$id: http://devicetree.org/schemas/display/panel/sony,td4353-jdi.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Sony TD4353 JDI 5 / 5.7" 2160x1080 MIPI-DSI Panel
maintainers:
- Konrad Dybcio <konrad.dybcio@somainline.org>
description: |
The Sony TD4353 JDI is a 5 (XZ2c) / 5.7 (XZ2) inch 2160x1080
MIPI-DSI panel, used in Xperia XZ2 and XZ2 Compact smartphones.
allOf:
- $ref: panel-common.yaml#
properties:
compatible:
const: sony,td4353-jdi-tama
reg: true
backlight: true
vddio-supply:
description: VDDIO 1.8V supply
vsp-supply:
description: Positive 5.5V supply
vsn-supply:
description: Negative 5.5V supply
panel-reset-gpios:
description: Display panel reset pin
touch-reset-gpios:
description: Touch panel reset pin
port: true
required:
- compatible
- reg
- vddio-supply
- vsp-supply
- vsn-supply
- panel-reset-gpios
- touch-reset-gpios
- port
additionalProperties: false
examples:
- |
#include <dt-bindings/gpio/gpio.h>
dsi {
#address-cells = <1>;
#size-cells = <0>;
panel: panel@0 {
compatible = "sony,td4353-jdi-tama";
reg = <0>;
backlight = <&pmi8998_wled>;
vddio-supply = <&vreg_l14a_1p8>;
vsp-supply = <&lab>;
vsn-supply = <&ibb>;
panel-reset-gpios = <&tlmm 6 GPIO_ACTIVE_HIGH>;
touch-reset-gpios = <&tlmm 99 GPIO_ACTIVE_HIGH>;
port {
panel_in: endpoint {
remote-endpoint = <&dsi0_out>;
};
};
};
};
...

View File

@ -19,6 +19,8 @@ properties:
compatible:
const: visionox,rm69299-1080p-display
reg: true
vdda-supply:
description: |
Phandle of the regulator that provides the vdda supply voltage.
@ -34,6 +36,7 @@ additionalProperties: false
required:
- compatible
- reg
- vdda-supply
- vdd3p3-supply
- reset-gpios
@ -41,16 +44,22 @@ required:
examples:
- |
panel {
compatible = "visionox,rm69299-1080p-display";
dsi {
#address-cells = <1>;
#size-cells = <0>;
vdda-supply = <&src_pp1800_l8c>;
vdd3p3-supply = <&src_pp2800_l18a>;
panel@0 {
compatible = "visionox,rm69299-1080p-display";
reg = <0>;
reset-gpios = <&pm6150l_gpio 3 0>;
port {
panel0_in: endpoint {
remote-endpoint = <&dsi0_out>;
vdda-supply = <&src_pp1800_l8c>;
vdd3p3-supply = <&src_pp2800_l18a>;
reset-gpios = <&pm6150l_gpio 3 0>;
port {
panel0_in: endpoint {
remote-endpoint = <&dsi0_out>;
};
};
};
};

View File

@ -17,6 +17,7 @@ properties:
const: xinpeng,xpp055c272
reg: true
backlight: true
port: true
reset-gpios: true
iovcc-supply:
description: regulator that supplies the iovcc voltage
@ -27,6 +28,7 @@ required:
- compatible
- reg
- backlight
- port
- iovcc-supply
- vci-supply
@ -44,6 +46,12 @@ examples:
backlight = <&backlight>;
iovcc-supply = <&vcc_1v8>;
vci-supply = <&vcc3v3_lcd>;
port {
mipi_in_panel: endpoint {
remote-endpoint = <&mipi_out_panel>;
};
};
};
};

View File

@ -76,7 +76,7 @@ properties:
unevaluatedProperties: false
renesas,cmms:
$ref: "/schemas/types.yaml#/definitions/phandle-array"
$ref: /schemas/types.yaml#/definitions/phandle-array
items:
maxItems: 1
description:
@ -84,7 +84,7 @@ properties:
available DU channel.
renesas,vsps:
$ref: "/schemas/types.yaml#/definitions/phandle-array"
$ref: /schemas/types.yaml#/definitions/phandle-array
items:
items:
- description: phandle to VSP instance that serves the DU channel

View File

@ -1,98 +0,0 @@
Rockchip RK3288 specific extensions to the Analogix Display Port
================================
Required properties:
- compatible: "rockchip,rk3288-dp",
"rockchip,rk3399-edp";
- reg: physical base address of the controller and length
- clocks: from common clock binding: handle to dp clock.
of memory mapped region.
- clock-names: from common clock binding:
Required elements: "dp" "pclk"
- resets: Must contain an entry for each entry in reset-names.
See ../reset/reset.txt for details.
- pinctrl-names: Names corresponding to the chip hotplug pinctrl states.
- pinctrl-0: pin-control mode. should be <&edp_hpd>
- reset-names: Must include the name "dp"
- rockchip,grf: this soc should set GRF regs, so need get grf here.
- ports: there are 2 port nodes with endpoint definitions as defined in
Documentation/devicetree/bindings/media/video-interfaces.txt.
Port 0: contained 2 endpoints, connecting to the output of vop.
Port 1: contained 1 endpoint, connecting to the input of panel.
Optional property for different chips:
- clocks: from common clock binding: handle to grf_vio clock.
- clock-names: from common clock binding:
Required elements: "grf"
For the below properties, please refer to Analogix DP binding document:
* Documentation/devicetree/bindings/display/bridge/analogix_dp.txt
- phys (required)
- phy-names (required)
- hpd-gpios (optional)
- force-hpd (optional)
-------------------------------------------------------------------------------
Example:
dp-controller: dp@ff970000 {
compatible = "rockchip,rk3288-dp";
reg = <0xff970000 0x4000>;
interrupts = <GIC_SPI 98 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cru SCLK_EDP>, <&cru PCLK_EDP_CTRL>;
clock-names = "dp", "pclk";
phys = <&dp_phy>;
phy-names = "dp";
rockchip,grf = <&grf>;
resets = <&cru 111>;
reset-names = "dp";
pinctrl-names = "default";
pinctrl-0 = <&edp_hpd>;
ports {
#address-cells = <1>;
#size-cells = <0>;
edp_in: port@0 {
reg = <0>;
#address-cells = <1>;
#size-cells = <0>;
edp_in_vopb: endpoint@0 {
reg = <0>;
remote-endpoint = <&vopb_out_edp>;
};
edp_in_vopl: endpoint@1 {
reg = <1>;
remote-endpoint = <&vopl_out_edp>;
};
};
edp_out: port@1 {
reg = <1>;
#address-cells = <1>;
#size-cells = <0>;
edp_out_panel: endpoint {
reg = <0>;
remote-endpoint = <&panel_in_edp>
};
};
};
};
pinctrl {
edp {
edp_hpd: edp-hpd {
rockchip,pins = <7 11 RK_FUNC_2 &pcfg_pull_none>;
};
};
};

View File

@ -1,94 +0,0 @@
Rockchip specific extensions to the Synopsys Designware MIPI DSI
================================
Required properties:
- #address-cells: Should be <1>.
- #size-cells: Should be <0>.
- compatible: one of
"rockchip,px30-mipi-dsi", "snps,dw-mipi-dsi"
"rockchip,rk3288-mipi-dsi", "snps,dw-mipi-dsi"
"rockchip,rk3399-mipi-dsi", "snps,dw-mipi-dsi"
"rockchip,rk3568-mipi-dsi", "snps,dw-mipi-dsi"
- reg: Represent the physical address range of the controller.
- interrupts: Represent the controller's interrupt to the CPU(s).
- clocks, clock-names: Phandles to the controller's pll reference
clock(ref) when using an internal dphy and APB clock(pclk).
For RK3399, a phy config clock (phy_cfg) and a grf clock(grf)
are required. As described in [1].
- rockchip,grf: this soc should set GRF regs to mux vopl/vopb.
- ports: contain a port node with endpoint definitions as defined in [2].
For vopb,set the reg = <0> and set the reg = <1> for vopl.
- video port 0 for the VOP input, the remote endpoint maybe vopb or vopl
- video port 1 for either a panel or subsequent encoder
Optional properties:
- phys: from general PHY binding: the phandle for the PHY device.
- phy-names: Should be "dphy" if phys references an external phy.
- #phy-cells: Defined when used as ISP phy, should be 0.
- power-domains: a phandle to mipi dsi power domain node.
- resets: list of phandle + reset specifier pairs, as described in [3].
- reset-names: string reset name, must be "apb".
[1] Documentation/devicetree/bindings/clock/clock-bindings.txt
[2] Documentation/devicetree/bindings/media/video-interfaces.txt
[3] Documentation/devicetree/bindings/reset/reset.txt
Example:
mipi_dsi: mipi@ff960000 {
#address-cells = <1>;
#size-cells = <0>;
compatible = "rockchip,rk3288-mipi-dsi", "snps,dw-mipi-dsi";
reg = <0xff960000 0x4000>;
interrupts = <GIC_SPI 83 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cru SCLK_MIPI_24M>, <&cru PCLK_MIPI_DSI0>;
clock-names = "ref", "pclk";
resets = <&cru SRST_MIPIDSI0>;
reset-names = "apb";
rockchip,grf = <&grf>;
ports {
#address-cells = <1>;
#size-cells = <0>;
mipi_in: port@0 {
reg = <0>;
#address-cells = <1>;
#size-cells = <0>;
mipi_in_vopb: endpoint@0 {
reg = <0>;
remote-endpoint = <&vopb_out_mipi>;
};
mipi_in_vopl: endpoint@1 {
reg = <1>;
remote-endpoint = <&vopl_out_mipi>;
};
};
mipi_out: port@1 {
reg = <1>;
#address-cells = <1>;
#size-cells = <0>;
mipi_out_panel: endpoint {
remote-endpoint = <&panel_in_mipi>;
};
};
};
panel {
compatible ="boe,tv080wum-nl0";
reg = <0>;
enable-gpios = <&gpio7 3 GPIO_ACTIVE_HIGH>;
pinctrl-names = "default";
pinctrl-0 = <&lcd_en>;
backlight = <&backlight>;
port {
panel_in_mipi: endpoint {
remote-endpoint = <&mipi_out_panel>;
};
};
};
};

View File

@ -0,0 +1,103 @@
# SPDX-License-Identifier: GPL-2.0
%YAML 1.2
---
$id: http://devicetree.org/schemas/display/rockchip/rockchip,analogix-dp.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Rockchip specific extensions to the Analogix Display Port
maintainers:
- Sandy Huang <hjc@rock-chips.com>
- Heiko Stuebner <heiko@sntech.de>
properties:
compatible:
enum:
- rockchip,rk3288-dp
- rockchip,rk3399-edp
clocks:
minItems: 2
maxItems: 3
clock-names:
minItems: 2
items:
- const: dp
- const: pclk
- const: grf
power-domains:
maxItems: 1
resets:
maxItems: 1
reset-names:
const: dp
rockchip,grf:
$ref: /schemas/types.yaml#/definitions/phandle
description:
This SoC makes use of GRF regs.
required:
- compatible
- clocks
- clock-names
- resets
- reset-names
- rockchip,grf
allOf:
- $ref: /schemas/display/bridge/analogix,dp.yaml#
unevaluatedProperties: false
examples:
- |
#include <dt-bindings/clock/rk3288-cru.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/interrupt-controller/irq.h>
dp@ff970000 {
compatible = "rockchip,rk3288-dp";
reg = <0xff970000 0x4000>;
interrupts = <GIC_SPI 98 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cru SCLK_EDP>, <&cru PCLK_EDP_CTRL>;
clock-names = "dp", "pclk";
phys = <&dp_phy>;
phy-names = "dp";
resets = <&cru 111>;
reset-names = "dp";
rockchip,grf = <&grf>;
pinctrl-0 = <&edp_hpd>;
pinctrl-names = "default";
ports {
#address-cells = <1>;
#size-cells = <0>;
edp_in: port@0 {
reg = <0>;
#address-cells = <1>;
#size-cells = <0>;
edp_in_vopb: endpoint@0 {
reg = <0>;
remote-endpoint = <&vopb_out_edp>;
};
edp_in_vopl: endpoint@1 {
reg = <1>;
remote-endpoint = <&vopl_out_edp>;
};
};
edp_out: port@1 {
reg = <1>;
edp_out_panel: endpoint {
remote-endpoint = <&panel_in_edp>;
};
};
};
};

View File

@ -0,0 +1,166 @@
# SPDX-License-Identifier: GPL-2.0
%YAML 1.2
---
$id: http://devicetree.org/schemas/display/rockchip/rockchip,dw-mipi-dsi.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Rockchip specific extensions to the Synopsys Designware MIPI DSI
maintainers:
- Sandy Huang <hjc@rock-chips.com>
- Heiko Stuebner <heiko@sntech.de>
properties:
compatible:
items:
- enum:
- rockchip,px30-mipi-dsi
- rockchip,rk3288-mipi-dsi
- rockchip,rk3399-mipi-dsi
- rockchip,rk3568-mipi-dsi
- const: snps,dw-mipi-dsi
interrupts:
maxItems: 1
clocks:
minItems: 1
maxItems: 4
clock-names:
oneOf:
- minItems: 2
items:
- const: ref
- const: pclk
- const: phy_cfg
- const: grf
- const: pclk
rockchip,grf:
$ref: /schemas/types.yaml#/definitions/phandle
description:
This SoC uses GRF regs to switch between vopl/vopb.
phys:
maxItems: 1
phy-names:
const: dphy
"#phy-cells":
const: 0
description:
Defined when in use as ISP phy.
power-domains:
maxItems: 1
"#address-cells":
const: 1
"#size-cells":
const: 0
required:
- compatible
- clocks
- clock-names
- rockchip,grf
allOf:
- $ref: /schemas/display/bridge/snps,dw-mipi-dsi.yaml#
- if:
properties:
compatible:
contains:
enum:
- rockchip,px30-mipi-dsi
- rockchip,rk3568-mipi-dsi
then:
properties:
clocks:
maxItems: 1
clock-names:
maxItems: 1
required:
- phys
- phy-names
- if:
properties:
compatible:
contains:
const: rockchip,rk3288-mipi-dsi
then:
properties:
clocks:
maxItems: 2
clock-names:
maxItems: 2
- if:
properties:
compatible:
contains:
const: rockchip,rk3399-mipi-dsi
then:
properties:
clocks:
minItems: 4
clock-names:
minItems: 4
unevaluatedProperties: false
examples:
- |
#include <dt-bindings/clock/rk3288-cru.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/interrupt-controller/irq.h>
mipi_dsi: dsi@ff960000 {
compatible = "rockchip,rk3288-mipi-dsi", "snps,dw-mipi-dsi";
reg = <0xff960000 0x4000>;
interrupts = <GIC_SPI 83 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cru SCLK_MIPIDSI_24M>, <&cru PCLK_MIPI_DSI0>;
clock-names = "ref", "pclk";
resets = <&cru SRST_MIPIDSI0>;
reset-names = "apb";
rockchip,grf = <&grf>;
ports {
#address-cells = <1>;
#size-cells = <0>;
mipi_in: port@0 {
reg = <0>;
#address-cells = <1>;
#size-cells = <0>;
mipi_in_vopb: endpoint@0 {
reg = <0>;
remote-endpoint = <&vopb_out_mipi>;
};
mipi_in_vopl: endpoint@1 {
reg = <1>;
remote-endpoint = <&vopl_out_mipi>;
};
};
mipi_out: port@1 {
reg = <1>;
mipi_out_panel: endpoint {
remote-endpoint = <&panel_in_mipi>;
};
};
};
};

View File

@ -0,0 +1,170 @@
# SPDX-License-Identifier: GPL-2.0
%YAML 1.2
---
$id: http://devicetree.org/schemas/display/rockchip/rockchip,lvds.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Rockchip low-voltage differential signal (LVDS) transmitter
maintainers:
- Sandy Huang <hjc@rock-chips.com>
- Heiko Stuebner <heiko@sntech.de>
properties:
compatible:
enum:
- rockchip,px30-lvds
- rockchip,rk3288-lvds
reg:
maxItems: 1
clocks:
maxItems: 1
clock-names:
const: pclk_lvds
avdd1v0-supply:
description: 1.0V analog power.
avdd1v8-supply:
description: 1.8V analog power.
avdd3v3-supply:
description: 3.3V analog power.
rockchip,grf:
$ref: /schemas/types.yaml#/definitions/phandle
description: Phandle to the general register files syscon.
rockchip,output:
$ref: /schemas/types.yaml#/definitions/string
enum: [rgb, lvds, duallvds]
description: This describes the output interface.
phys:
maxItems: 1
phy-names:
const: dphy
pinctrl-names:
const: lcdc
pinctrl-0: true
power-domains:
maxItems: 1
ports:
$ref: /schemas/graph.yaml#/properties/ports
properties:
port@0:
$ref: /schemas/graph.yaml#/properties/port
description:
Video port 0 for the VOP input.
The remote endpoint maybe vopb or vopl.
port@1:
$ref: /schemas/graph.yaml#/properties/port
description:
Video port 1 for either a panel or subsequent encoder.
required:
- port@0
- port@1
required:
- compatible
- rockchip,grf
- rockchip,output
- ports
allOf:
- if:
properties:
compatible:
contains:
const: rockchip,px30-lvds
then:
properties:
reg: false
clocks: false
clock-names: false
avdd1v0-supply: false
avdd1v8-supply: false
avdd3v3-supply: false
required:
- phys
- phy-names
- if:
properties:
compatible:
contains:
const: rockchip,rk3288-lvds
then:
properties:
phys: false
phy-names: false
required:
- reg
- clocks
- clock-names
- avdd1v0-supply
- avdd1v8-supply
- avdd3v3-supply
additionalProperties: false
examples:
- |
#include <dt-bindings/clock/rk3288-cru.h>
lvds: lvds@ff96c000 {
compatible = "rockchip,rk3288-lvds";
reg = <0xff96c000 0x4000>;
clocks = <&cru PCLK_LVDS_PHY>;
clock-names = "pclk_lvds";
avdd1v0-supply = <&vdd10_lcd>;
avdd1v8-supply = <&vcc18_lcd>;
avdd3v3-supply = <&vcca_33>;
pinctrl-names = "lcdc";
pinctrl-0 = <&lcdc_ctl>;
rockchip,grf = <&grf>;
rockchip,output = "rgb";
ports {
#address-cells = <1>;
#size-cells = <0>;
lvds_in: port@0 {
reg = <0>;
#address-cells = <1>;
#size-cells = <0>;
lvds_in_vopb: endpoint@0 {
reg = <0>;
remote-endpoint = <&vopb_out_lvds>;
};
lvds_in_vopl: endpoint@1 {
reg = <1>;
remote-endpoint = <&vopl_out_lvds>;
};
};
lvds_out: port@1 {
reg = <1>;
lvds_out_panel: endpoint {
remote-endpoint = <&panel_in_lvds>;
};
};
};
};

View File

@ -1,92 +0,0 @@
Rockchip RK3288 LVDS interface
================================
Required properties:
- compatible: matching the soc type, one of
- "rockchip,rk3288-lvds";
- "rockchip,px30-lvds";
- reg: physical base address of the controller and length
of memory mapped region.
- clocks: must include clock specifiers corresponding to entries in the
clock-names property.
- clock-names: must contain "pclk_lvds"
- avdd1v0-supply: regulator phandle for 1.0V analog power
- avdd1v8-supply: regulator phandle for 1.8V analog power
- avdd3v3-supply: regulator phandle for 3.3V analog power
- rockchip,grf: phandle to the general register files syscon
- rockchip,output: "rgb", "lvds" or "duallvds", This describes the output interface
- phys: LVDS/DSI DPHY (px30 only)
- phy-names: name of the PHY, must be "dphy" (px30 only)
Optional properties:
- pinctrl-names: must contain a "lcdc" entry.
- pinctrl-0: pin control group to be used for this controller.
Required nodes:
The lvds has two video ports as described by
Documentation/devicetree/bindings/media/video-interfaces.txt
Their connections are modeled using the OF graph bindings specified in
Documentation/devicetree/bindings/graph.txt.
- video port 0 for the VOP input, the remote endpoint maybe vopb or vopl
- video port 1 for either a panel or subsequent encoder
Example:
lvds_panel: lvds-panel {
compatible = "auo,b101ean01";
enable-gpios = <&gpio7 21 GPIO_ACTIVE_HIGH>;
data-mapping = "jeida-24";
ports {
panel_in_lvds: endpoint {
remote-endpoint = <&lvds_out_panel>;
};
};
};
For Rockchip RK3288:
lvds: lvds@ff96c000 {
compatible = "rockchip,rk3288-lvds";
rockchip,grf = <&grf>;
reg = <0xff96c000 0x4000>;
clocks = <&cru PCLK_LVDS_PHY>;
clock-names = "pclk_lvds";
pinctrl-names = "lcdc";
pinctrl-0 = <&lcdc_ctl>;
avdd1v0-supply = <&vdd10_lcd>;
avdd1v8-supply = <&vcc18_lcd>;
avdd3v3-supply = <&vcca_33>;
rockchip,output = "rgb";
ports {
#address-cells = <1>;
#size-cells = <0>;
lvds_in: port@0 {
reg = <0>;
lvds_in_vopb: endpoint@0 {
reg = <0>;
remote-endpoint = <&vopb_out_lvds>;
};
lvds_in_vopl: endpoint@1 {
reg = <1>;
remote-endpoint = <&vopl_out_lvds>;
};
};
lvds_out: port@1 {
reg = <1>;
lvds_out_panel: endpoint {
remote-endpoint = <&panel_in_lvds>;
};
};
};
};

View File

@ -26,6 +26,11 @@ description: |+
over control to a driver for the real hardware. The bindings for the
hw nodes must specify which node is considered the primary node.
If a panel node is given, then the driver uses this to configure the
physical width and height of the display. If no panel node is given,
then the driver uses the width and height properties of the simplefb
node to estimate it.
It is advised to add display# aliases to help the OS determine how
to number things. If display# aliases are used, then if the simplefb
node contains a display property then the /aliases/display# path
@ -117,6 +122,10 @@ properties:
$ref: /schemas/types.yaml#/definitions/phandle
description: Primary display hardware node
panel:
$ref: /schemas/types.yaml#/definitions/phandle
description: Display panel node
allwinner,pipeline:
description: Pipeline used by the framebuffer on Allwinner SoCs
enum:

View File

@ -38,7 +38,7 @@ properties:
description: The number of cells in a MIPI calibration specifier.
Should be 1. The single cell specifies a bitmask of the pads that
need to be calibrated for a given device.
$ref: "/schemas/types.yaml#/definitions/uint32"
$ref: /schemas/types.yaml#/definitions/uint32
const: 1
additionalProperties: false

View File

@ -69,12 +69,12 @@ properties:
# Tegra186 and later
nvidia,interface:
description: index of the SOR interface
$ref: "/schemas/types.yaml#/definitions/uint32"
$ref: /schemas/types.yaml#/definitions/uint32
nvidia,ddc-i2c-bus:
description: phandle of an I2C controller used for DDC EDID
probing
$ref: "/schemas/types.yaml#/definitions/phandle"
$ref: /schemas/types.yaml#/definitions/phandle
nvidia,hpd-gpio:
description: specifies a GPIO used for hotplug detection
@ -82,23 +82,23 @@ properties:
nvidia,edid:
description: supplies a binary EDID blob
$ref: "/schemas/types.yaml#/definitions/uint8-array"
$ref: /schemas/types.yaml#/definitions/uint8-array
nvidia,panel:
description: phandle of a display panel, required for eDP
$ref: "/schemas/types.yaml#/definitions/phandle"
$ref: /schemas/types.yaml#/definitions/phandle
nvidia,xbar-cfg:
description: 5 cells containing the crossbar configuration.
Each lane of the SOR, identified by the cell's index, is
mapped via the crossbar to the pad specified by the cell's
value.
$ref: "/schemas/types.yaml#/definitions/uint32-array"
$ref: /schemas/types.yaml#/definitions/uint32-array
# optional when driving an eDP output
nvidia,dpaux:
description: phandle to a DispayPort AUX interface
$ref: "/schemas/types.yaml#/definitions/phandle"
$ref: /schemas/types.yaml#/definitions/phandle
allOf:
- if:

View File

@ -60,13 +60,13 @@ properties:
nvidia,outputs:
description: A list of phandles of outputs that this display
controller can drive.
$ref: "/schemas/types.yaml#/definitions/phandle-array"
$ref: /schemas/types.yaml#/definitions/phandle-array
nvidia,head:
description: The number of the display controller head. This
is used to setup the various types of output to receive
video data from the given head.
$ref: "/schemas/types.yaml#/definitions/uint32"
$ref: /schemas/types.yaml#/definitions/uint32
additionalProperties: false

View File

@ -29,7 +29,7 @@ properties:
- const: dsi
allOf:
- $ref: "/schemas/reset/reset.yaml"
- $ref: /schemas/reset/reset.yaml
additionalProperties: false

View File

@ -59,12 +59,12 @@ properties:
description: Should contain a phandle and a specifier specifying
which pads are used by this DSI output and need to be
calibrated. See nvidia,tegra114-mipi.yaml for details.
$ref: "/schemas/types.yaml#/definitions/phandle-array"
$ref: /schemas/types.yaml#/definitions/phandle-array
nvidia,ddc-i2c-bus:
description: phandle of an I2C controller used for DDC EDID
probing
$ref: "/schemas/types.yaml#/definitions/phandle"
$ref: /schemas/types.yaml#/definitions/phandle
nvidia,hpd-gpio:
description: specifies a GPIO used for hotplug detection
@ -72,19 +72,19 @@ properties:
nvidia,edid:
description: supplies a binary EDID blob
$ref: "/schemas/types.yaml#/definitions/uint8-array"
$ref: /schemas/types.yaml#/definitions/uint8-array
nvidia,panel:
description: phandle of a display panel
$ref: "/schemas/types.yaml#/definitions/phandle"
$ref: /schemas/types.yaml#/definitions/phandle
nvidia,ganged-mode:
description: contains a phandle to a second DSI controller to
gang up with in order to support up to 8 data lanes
$ref: "/schemas/types.yaml#/definitions/phandle"
$ref: /schemas/types.yaml#/definitions/phandle
allOf:
- $ref: "../dsi-controller.yaml#"
- $ref: ../dsi-controller.yaml#
- if:
properties:
compatible:

View File

@ -68,7 +68,7 @@ properties:
nvidia,ddc-i2c-bus:
description: phandle of an I2C controller used for DDC EDID
probing
$ref: "/schemas/types.yaml#/definitions/phandle"
$ref: /schemas/types.yaml#/definitions/phandle
nvidia,hpd-gpio:
description: specifies a GPIO used for hotplug detection
@ -76,11 +76,11 @@ properties:
nvidia,edid:
description: supplies a binary EDID blob
$ref: "/schemas/types.yaml#/definitions/uint8-array"
$ref: /schemas/types.yaml#/definitions/uint8-array
nvidia,panel:
description: phandle of a display panel
$ref: "/schemas/types.yaml#/definitions/phandle"
$ref: /schemas/types.yaml#/definitions/phandle
"#sound-dai-cells":
const: 0

View File

@ -2,8 +2,8 @@
# Copyright 2019 Texas Instruments Incorporated
%YAML 1.2
---
$id: "http://devicetree.org/schemas/display/ti/ti,am65x-dss.yaml#"
$schema: "http://devicetree.org/meta-schemas/core.yaml#"
$id: http://devicetree.org/schemas/display/ti/ti,am65x-dss.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Texas Instruments AM65x Display Subsystem
@ -88,7 +88,7 @@ properties:
The DSS DPI output port node from video port 2
ti,am65x-oldi-io-ctrl:
$ref: "/schemas/types.yaml#/definitions/phandle"
$ref: /schemas/types.yaml#/definitions/phandle
description:
phandle to syscon device node mapping OLDI IO_CTRL registers.
The mapped range should point to OLDI_DAT0_IO_CTRL, map it and

View File

@ -2,8 +2,8 @@
# Copyright 2019 Texas Instruments Incorporated
%YAML 1.2
---
$id: "http://devicetree.org/schemas/display/ti/ti,j721e-dss.yaml#"
$schema: "http://devicetree.org/meta-schemas/core.yaml#"
$id: http://devicetree.org/schemas/display/ti/ti,j721e-dss.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Texas Instruments J721E Display Subsystem

View File

@ -2,8 +2,8 @@
# Copyright 2019 Texas Instruments Incorporated
%YAML 1.2
---
$id: "http://devicetree.org/schemas/display/ti/ti,k2g-dss.yaml#"
$schema: "http://devicetree.org/meta-schemas/core.yaml#"
$id: http://devicetree.org/schemas/display/ti/ti,k2g-dss.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Texas Instruments K2G Display Subsystem

View File

@ -2,8 +2,8 @@
# Copyright 2019 Bootlin
%YAML 1.2
---
$id: "http://devicetree.org/schemas/display/xylon,logicvc-display.yaml#"
$schema: "http://devicetree.org/meta-schemas/core.yaml#"
$id: http://devicetree.org/schemas/display/xylon,logicvc-display.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Xylon LogiCVC display controller
@ -89,25 +89,25 @@ properties:
description: Display output colorspace (C_DISPLAY_COLOR_SPACE).
xylon,display-depth:
$ref: "/schemas/types.yaml#/definitions/uint32"
$ref: /schemas/types.yaml#/definitions/uint32
description: Display output depth (C_PIXEL_DATA_WIDTH).
xylon,row-stride:
$ref: "/schemas/types.yaml#/definitions/uint32"
$ref: /schemas/types.yaml#/definitions/uint32
description: Fixed number of pixels in a framebuffer row (C_ROW_STRIDE).
xylon,dithering:
$ref: "/schemas/types.yaml#/definitions/flag"
$ref: /schemas/types.yaml#/definitions/flag
description: Dithering module is enabled (C_XCOLOR)
xylon,background-layer:
$ref: "/schemas/types.yaml#/definitions/flag"
$ref: /schemas/types.yaml#/definitions/flag
description: |
The last layer is used to display a black background (C_USE_BACKGROUND).
The layer must still be registered.
xylon,layers-configurable:
$ref: "/schemas/types.yaml#/definitions/flag"
$ref: /schemas/types.yaml#/definitions/flag
description: |
Configuration of layers' size, position and offset is enabled
(C_USE_SIZE_POSITION).
@ -131,7 +131,7 @@ properties:
maxItems: 1
xylon,layer-depth:
$ref: "/schemas/types.yaml#/definitions/uint32"
$ref: /schemas/types.yaml#/definitions/uint32
description: Layer depth (C_LAYER_X_DATA_WIDTH).
xylon,layer-colorspace:
@ -151,19 +151,19 @@ properties:
description: Alpha mode for the layer (C_LAYER_X_ALPHA_MODE).
xylon,layer-base-offset:
$ref: "/schemas/types.yaml#/definitions/uint32"
$ref: /schemas/types.yaml#/definitions/uint32
description: |
Offset in number of lines (C_LAYER_X_OFFSET) starting from the
video RAM base (C_VMEM_BASEADDR), only for version 3.
xylon,layer-buffer-offset:
$ref: "/schemas/types.yaml#/definitions/uint32"
$ref: /schemas/types.yaml#/definitions/uint32
description: |
Offset in number of lines (C_BUFFER_*_OFFSET) starting from the
layer base offset for the second buffer used in double-buffering.
xylon,layer-primary:
$ref: "/schemas/types.yaml#/definitions/flag"
$ref: /schemas/types.yaml#/definitions/flag
description: |
Layer should be registered as a primary plane (exactly one is
required).

View File

@ -19,12 +19,19 @@ properties:
- enum:
- amlogic,meson-g12a-mali
- mediatek,mt8183-mali
- mediatek,mt8183b-mali
- mediatek,mt8186-mali
- realtek,rtd1619-mali
- renesas,r9a07g044-mali
- renesas,r9a07g054-mali
- rockchip,px30-mali
- rockchip,rk3568-mali
- const: arm,mali-bifrost # Mali Bifrost GPU model/revision is fully discoverable
- items:
- enum:
- mediatek,mt8195-mali
- const: mediatek,mt8192-mali
- const: arm,mali-valhall-jm # Mali Valhall GPU model/revision is fully discoverable
- items:
- enum:
- mediatek,mt8192-mali
@ -63,7 +70,11 @@ properties:
power-domains:
minItems: 1
maxItems: 3
maxItems: 5
power-domain-names:
minItems: 2
maxItems: 5
resets:
minItems: 1
@ -93,6 +104,13 @@ properties:
dma-coherent: true
nvmem-cell-names:
items:
- const: speed-bin
nvmem-cells:
maxItems: 1
required:
- compatible
- reg
@ -109,6 +127,10 @@ allOf:
contains:
const: amlogic,meson-g12a-mali
then:
properties:
power-domains:
maxItems: 1
power-domain-names: false
required:
- resets
- if:
@ -131,6 +153,9 @@ allOf:
- const: gpu
- const: bus
- const: bus_ace
power-domains:
maxItems: 1
power-domain-names: false
resets:
minItems: 3
reset-names:
@ -152,6 +177,7 @@ allOf:
properties:
power-domains:
minItems: 3
maxItems: 3
power-domain-names:
items:
- const: core0
@ -164,9 +190,61 @@ allOf:
- power-domain-names
else:
properties:
power-domains:
maxItems: 1
sram-supply: false
- if:
properties:
compatible:
contains:
const: mediatek,mt8183b-mali
then:
properties:
power-domains:
minItems: 3
maxItems: 3
power-domain-names:
items:
- const: core0
- const: core1
- const: core2
required:
- power-domains
- power-domain-names
- if:
properties:
compatible:
contains:
const: mediatek,mt8186-mali
then:
properties:
power-domains:
minItems: 2
maxItems: 2
power-domain-names:
items:
- const: core0
- const: core1
required:
- power-domains
- power-domain-names
- if:
properties:
compatible:
contains:
const: mediatek,mt8192-mali
then:
properties:
power-domains:
minItems: 5
power-domain-names:
items:
- const: core0
- const: core1
- const: core2
- const: core3
- const: core4
required:
- power-domains
- power-domain-names
- if:
properties:
compatible:
@ -180,6 +258,9 @@ allOf:
items:
- const: gpu
- const: bus
power-domains:
maxItems: 1
power-domain-names: false
required:
- clock-names

View File

@ -80,13 +80,17 @@ allOf:
properties:
compatible:
contains:
const: rockchip,px30-grf
enum:
- rockchip,px30-grf
then:
properties:
lvds:
description:
Documentation/devicetree/bindings/display/rockchip/rockchip-lvds.txt
type: object
$ref: /schemas/display/rockchip/rockchip,lvds.yaml#
unevaluatedProperties: false
- if:
properties:

View File

@ -164,6 +164,12 @@ DMA Fence Signalling Annotations
.. kernel-doc:: drivers/dma-buf/dma-fence.c
:doc: fence signalling annotation
DMA Fence Deadline Hints
~~~~~~~~~~~~~~~~~~~~~~~~
.. kernel-doc:: drivers/dma-buf/dma-fence.c
:doc: deadline hints
DMA Fences Functions Reference
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@ -197,8 +203,8 @@ DMA Fence unwrap
.. kernel-doc:: include/linux/dma-fence-unwrap.h
:internal:
DMA Fence uABI/Sync File
~~~~~~~~~~~~~~~~~~~~~~~~
DMA Fence Sync File
~~~~~~~~~~~~~~~~~~~
.. kernel-doc:: drivers/dma-buf/sync_file.c
:export:
@ -206,6 +212,12 @@ DMA Fence uABI/Sync File
.. kernel-doc:: include/linux/sync_file.h
:internal:
DMA Fence Sync File uABI
~~~~~~~~~~~~~~~~~~~~~~~~
.. kernel-doc:: include/uapi/linux/sync_file.h
:internal:
Indefinite DMA Fences
~~~~~~~~~~~~~~~~~~~~~

View File

@ -173,7 +173,7 @@ The alpha blending equation is configured from DRM to DC interface by the
following path:
1. When updating a :c:type:`drm_plane_state <drm_plane_state>`, DM calls
:c:type:`fill_blending_from_plane_state()` that maps
:c:type:`amdgpu_dm_plane_fill_blending_from_plane_state()` that maps
:c:type:`drm_plane_state <drm_plane_state>` attributes to
:c:type:`dc_plane_info <dc_plane_info>` struct to be handled in the
OS-agnostic component (DC).

View File

@ -6521,6 +6521,7 @@ L: linux-arm-msm@vger.kernel.org
L: dri-devel@lists.freedesktop.org
L: freedreno@lists.freedesktop.org
S: Maintained
B: https://gitlab.freedesktop.org/drm/msm/-/issues
T: git https://gitlab.freedesktop.org/drm/msm.git
F: Documentation/devicetree/bindings/display/msm/
F: drivers/gpu/drm/msm/
@ -6540,6 +6541,13 @@ T: git git://anongit.freedesktop.org/drm/drm-misc
F: Documentation/devicetree/bindings/display/panel/sony,acx424akp.yaml
F: drivers/gpu/drm/panel/panel-novatek-nt35560.c
DRM DRIVER FOR NOVATEK NT36523 PANELS
M: Jianhua Lu <lujianhua000@gmail.com>
S: Maintained
T: git git://anongit.freedesktop.org/drm/drm-misc
F: Documentation/devicetree/bindings/display/panel/novatek,nt36523.yaml
F: drivers/gpu/drm/panel/panel-novatek-nt36523.c
DRM DRIVER FOR NOVATEK NT36672A PANELS
M: Sumit Semwal <sumit.semwal@linaro.org>
S: Maintained
@ -6620,6 +6628,16 @@ T: git git://anongit.freedesktop.org/drm/drm-misc
F: Documentation/devicetree/bindings/display/panel/samsung,lms397kf04.yaml
F: drivers/gpu/drm/panel/panel-samsung-db7430.c
DRM DRIVER FOR SAMSUNG MIPI DSIM BRIDGE
M: Inki Dae <inki.dae@samsung.com>
M: Jagan Teki <jagan@amarulasolutions.com>
M: Marek Szyprowski <m.szyprowski@samsung.com>
S: Maintained
T: git git://anongit.freedesktop.org/drm/drm-misc
F: Documentation/devicetree/bindings/display/bridge/samsung,mipi-dsim.yaml
F: drivers/gpu/drm/bridge/samsung-dsim.c
F: include/drm/bridge/samsung-dsim.h
DRM DRIVER FOR SAMSUNG S6D27A1 PANELS
M: Markuss Broks <markuss.broks@gmail.com>
S: Maintained
@ -6827,6 +6845,7 @@ S: Maintained
T: git git://anongit.freedesktop.org/drm/drm-misc
F: Documentation/devicetree/bindings/display/bridge/
F: drivers/gpu/drm/bridge/
F: include/drm/drm_bridge.h
DRM DRIVERS FOR EXYNOS
M: Inki Dae <inki.dae@samsung.com>
@ -6919,6 +6938,7 @@ F: drivers/phy/mediatek/phy-mtk-mipi*
DRM DRIVERS FOR NVIDIA TEGRA
M: Thierry Reding <thierry.reding@gmail.com>
M: Mikko Perttunen <mperttunen@nvidia.com>
L: dri-devel@lists.freedesktop.org
L: linux-tegra@vger.kernel.org
S: Supported
@ -7047,7 +7067,7 @@ F: Documentation/devicetree/bindings/display/xlnx/
F: drivers/gpu/drm/xlnx/
DRM PANEL DRIVERS
M: Thierry Reding <thierry.reding@gmail.com>
M: Neil Armstrong <neil.armstrong@linaro.org>
R: Sam Ravnborg <sam@ravnborg.org>
L: dri-devel@lists.freedesktop.org
S: Maintained
@ -17255,6 +17275,16 @@ F: Documentation/devicetree/bindings/clock/qcom,*
F: drivers/clk/qcom/
F: include/dt-bindings/clock/qcom,*
QUALCOMM CLOUD AI (QAIC) DRIVER
M: Jeffrey Hugo <quic_jhugo@quicinc.com>
L: linux-arm-msm@vger.kernel.org
L: dri-devel@lists.freedesktop.org
S: Supported
T: git git://anongit.freedesktop.org/drm/drm-misc
F: Documentation/accel/qaic/
F: drivers/accel/qaic/
F: include/uapi/drm/qaic_accel.h
QUALCOMM CORE POWER REDUCTION (CPR) AVS DRIVER
M: Bjorn Andersson <andersson@kernel.org>
M: Konrad Dybcio <konrad.dybcio@linaro.org>

View File

@ -26,5 +26,6 @@ menuconfig DRM_ACCEL
source "drivers/accel/habanalabs/Kconfig"
source "drivers/accel/ivpu/Kconfig"
source "drivers/accel/qaic/Kconfig"
endif

View File

@ -2,3 +2,4 @@
obj-$(CONFIG_DRM_ACCEL_HABANALABS) += habanalabs/
obj-$(CONFIG_DRM_ACCEL_IVPU) += ivpu/
obj-$(CONFIG_DRM_ACCEL_QAIC) += qaic/

View File

@ -45,20 +45,29 @@ static int cb_map_mem(struct hl_ctx *ctx, struct hl_cb *cb)
}
mutex_lock(&hdev->mmu_lock);
rc = hl_mmu_map_contiguous(ctx, cb->virtual_addr, cb->bus_address, cb->roundup_size);
if (rc) {
dev_err(hdev->dev, "Failed to map VA %#llx to CB\n", cb->virtual_addr);
goto err_va_umap;
goto err_va_pool_free;
}
rc = hl_mmu_invalidate_cache(hdev, false, MMU_OP_USERPTR | MMU_OP_SKIP_LOW_CACHE_INV);
if (rc)
goto err_mmu_unmap;
mutex_unlock(&hdev->mmu_lock);
cb->is_mmu_mapped = true;
return rc;
err_va_umap:
return 0;
err_mmu_unmap:
hl_mmu_unmap_contiguous(ctx, cb->virtual_addr, cb->roundup_size);
err_va_pool_free:
mutex_unlock(&hdev->mmu_lock);
gen_pool_free(ctx->cb_va_pool, cb->virtual_addr, cb->roundup_size);
return rc;
}

View File

@ -14,10 +14,10 @@
#define HL_CS_FLAGS_TYPE_MASK (HL_CS_FLAGS_SIGNAL | HL_CS_FLAGS_WAIT | \
HL_CS_FLAGS_COLLECTIVE_WAIT | HL_CS_FLAGS_RESERVE_SIGNALS_ONLY | \
HL_CS_FLAGS_UNRESERVE_SIGNALS_ONLY | HL_CS_FLAGS_ENGINE_CORE_COMMAND | \
HL_CS_FLAGS_FLUSH_PCI_HBW_WRITES)
HL_CS_FLAGS_ENGINES_COMMAND | HL_CS_FLAGS_FLUSH_PCI_HBW_WRITES)
#define MAX_TS_ITER_NUM 10
#define MAX_TS_ITER_NUM 100
/**
* enum hl_cs_wait_status - cs wait status
@ -657,7 +657,7 @@ static inline void cs_release_sob_reset_handler(struct hl_device *hdev,
/*
* we get refcount upon reservation of signals or signal/wait cs for the
* hw_sob object, and need to put it when the first staged cs
* (which cotains the encaps signals) or cs signal/wait is completed.
* (which contains the encaps signals) or cs signal/wait is completed.
*/
if ((hl_cs_cmpl->type == CS_TYPE_SIGNAL) ||
(hl_cs_cmpl->type == CS_TYPE_WAIT) ||
@ -1082,9 +1082,8 @@ static void
wake_pending_user_interrupt_threads(struct hl_user_interrupt *interrupt)
{
struct hl_user_pending_interrupt *pend, *temp;
unsigned long flags;
spin_lock_irqsave(&interrupt->wait_list_lock, flags);
spin_lock(&interrupt->wait_list_lock);
list_for_each_entry_safe(pend, temp, &interrupt->wait_list_head, wait_list_node) {
if (pend->ts_reg_info.buf) {
list_del(&pend->wait_list_node);
@ -1095,7 +1094,7 @@ wake_pending_user_interrupt_threads(struct hl_user_interrupt *interrupt)
complete_all(&pend->fence.completion);
}
}
spin_unlock_irqrestore(&interrupt->wait_list_lock, flags);
spin_unlock(&interrupt->wait_list_lock);
}
void hl_release_pending_user_interrupts(struct hl_device *hdev)
@ -1168,6 +1167,22 @@ static void cs_completion(struct work_struct *work)
hl_complete_job(hdev, job);
}
u32 hl_get_active_cs_num(struct hl_device *hdev)
{
u32 active_cs_num = 0;
struct hl_cs *cs;
spin_lock(&hdev->cs_mirror_lock);
list_for_each_entry(cs, &hdev->cs_mirror_list, mirror_node)
if (!cs->completed)
active_cs_num++;
spin_unlock(&hdev->cs_mirror_lock);
return active_cs_num;
}
static int validate_queue_index(struct hl_device *hdev,
struct hl_cs_chunk *chunk,
enum hl_queue_type *queue_type,
@ -1304,6 +1319,8 @@ static enum hl_cs_type hl_cs_get_cs_type(u32 cs_type_flags)
return CS_UNRESERVE_SIGNALS;
else if (cs_type_flags & HL_CS_FLAGS_ENGINE_CORE_COMMAND)
return CS_TYPE_ENGINE_CORE;
else if (cs_type_flags & HL_CS_FLAGS_ENGINES_COMMAND)
return CS_TYPE_ENGINES;
else if (cs_type_flags & HL_CS_FLAGS_FLUSH_PCI_HBW_WRITES)
return CS_TYPE_FLUSH_PCI_HBW_WRITES;
else
@ -2429,10 +2446,13 @@ out:
static int cs_ioctl_engine_cores(struct hl_fpriv *hpriv, u64 engine_cores,
u32 num_engine_cores, u32 core_command)
{
int rc;
struct hl_device *hdev = hpriv->hdev;
void __user *engine_cores_arr;
u32 *cores;
int rc;
if (!hdev->asic_prop.supports_engine_modes)
return -EPERM;
if (!num_engine_cores || num_engine_cores > hdev->asic_prop.num_engine_cores) {
dev_err(hdev->dev, "Number of engine cores %d is invalid\n", num_engine_cores);
@ -2461,6 +2481,48 @@ static int cs_ioctl_engine_cores(struct hl_fpriv *hpriv, u64 engine_cores,
return rc;
}
static int cs_ioctl_engines(struct hl_fpriv *hpriv, u64 engines_arr_user_addr,
u32 num_engines, enum hl_engine_command command)
{
struct hl_device *hdev = hpriv->hdev;
u32 *engines, max_num_of_engines;
void __user *engines_arr;
int rc;
if (!hdev->asic_prop.supports_engine_modes)
return -EPERM;
if (command >= HL_ENGINE_COMMAND_MAX) {
dev_err(hdev->dev, "Engine command is invalid\n");
return -EINVAL;
}
max_num_of_engines = hdev->asic_prop.max_num_of_engines;
if (command == HL_ENGINE_CORE_RUN || command == HL_ENGINE_CORE_HALT)
max_num_of_engines = hdev->asic_prop.num_engine_cores;
if (!num_engines || num_engines > max_num_of_engines) {
dev_err(hdev->dev, "Number of engines %d is invalid\n", num_engines);
return -EINVAL;
}
engines_arr = (void __user *) (uintptr_t) engines_arr_user_addr;
engines = kmalloc_array(num_engines, sizeof(u32), GFP_KERNEL);
if (!engines)
return -ENOMEM;
if (copy_from_user(engines, engines_arr, num_engines * sizeof(u32))) {
dev_err(hdev->dev, "Failed to copy engine-ids array from user\n");
kfree(engines);
return -EFAULT;
}
rc = hdev->asic_funcs->set_engines(hdev, engines, num_engines, command);
kfree(engines);
return rc;
}
static int cs_ioctl_flush_pci_hbw_writes(struct hl_fpriv *hpriv)
{
struct hl_device *hdev = hpriv->hdev;
@ -2532,6 +2594,10 @@ int hl_cs_ioctl(struct hl_fpriv *hpriv, void *data)
rc = cs_ioctl_engine_cores(hpriv, args->in.engine_cores,
args->in.num_engine_cores, args->in.core_command);
break;
case CS_TYPE_ENGINES:
rc = cs_ioctl_engines(hpriv, args->in.engines,
args->in.num_engines, args->in.engine_command);
break;
case CS_TYPE_FLUSH_PCI_HBW_WRITES:
rc = cs_ioctl_flush_pci_hbw_writes(hpriv);
break;
@ -3143,8 +3209,9 @@ static int ts_buff_get_kernel_ts_record(struct hl_mmap_mem_buf *buf,
struct hl_user_pending_interrupt *cb_last =
(struct hl_user_pending_interrupt *)ts_buff->kernel_buff_address +
(ts_buff->kernel_buff_size / sizeof(struct hl_user_pending_interrupt));
unsigned long flags, iter_counter = 0;
unsigned long iter_counter = 0;
u64 current_cq_counter;
ktime_t timestamp;
/* Validate ts_offset not exceeding last max */
if (requested_offset_record >= cb_last) {
@ -3153,8 +3220,10 @@ static int ts_buff_get_kernel_ts_record(struct hl_mmap_mem_buf *buf,
return -EINVAL;
}
timestamp = ktime_get();
start_over:
spin_lock_irqsave(wait_list_lock, flags);
spin_lock(wait_list_lock);
/* Unregister only if we didn't reach the target value
* since in this case there will be no handling in irq context
@ -3165,7 +3234,7 @@ start_over:
current_cq_counter = *requested_offset_record->cq_kernel_addr;
if (current_cq_counter < requested_offset_record->cq_target_value) {
list_del(&requested_offset_record->wait_list_node);
spin_unlock_irqrestore(wait_list_lock, flags);
spin_unlock(wait_list_lock);
hl_mmap_mem_buf_put(requested_offset_record->ts_reg_info.buf);
hl_cb_put(requested_offset_record->ts_reg_info.cq_cb);
@ -3176,13 +3245,14 @@ start_over:
dev_dbg(buf->mmg->dev,
"ts node in middle of irq handling\n");
/* irq handling in the middle give it time to finish */
spin_unlock_irqrestore(wait_list_lock, flags);
usleep_range(1, 10);
/* irq thread handling in the middle give it time to finish */
spin_unlock(wait_list_lock);
usleep_range(100, 1000);
if (++iter_counter == MAX_TS_ITER_NUM) {
dev_err(buf->mmg->dev,
"handling registration interrupt took too long!!\n");
return -EINVAL;
"Timestamp offset processing reached timeout of %lld ms\n",
ktime_ms_delta(ktime_get(), timestamp));
return -EAGAIN;
}
goto start_over;
@ -3197,7 +3267,7 @@ start_over:
(u64 *) cq_cb->kernel_address + cq_offset;
requested_offset_record->cq_target_value = target_value;
spin_unlock_irqrestore(wait_list_lock, flags);
spin_unlock(wait_list_lock);
}
*pend = requested_offset_record;
@ -3217,7 +3287,7 @@ static int _hl_interrupt_wait_ioctl(struct hl_device *hdev, struct hl_ctx *ctx,
struct hl_user_pending_interrupt *pend;
struct hl_mmap_mem_buf *buf;
struct hl_cb *cq_cb;
unsigned long timeout, flags;
unsigned long timeout;
long completion_rc;
int rc = 0;
@ -3264,7 +3334,7 @@ static int _hl_interrupt_wait_ioctl(struct hl_device *hdev, struct hl_ctx *ctx,
pend->cq_target_value = target_value;
}
spin_lock_irqsave(&interrupt->wait_list_lock, flags);
spin_lock(&interrupt->wait_list_lock);
/* We check for completion value as interrupt could have been received
* before we added the node to the wait list
@ -3272,7 +3342,7 @@ static int _hl_interrupt_wait_ioctl(struct hl_device *hdev, struct hl_ctx *ctx,
if (*pend->cq_kernel_addr >= target_value) {
if (register_ts_record)
pend->ts_reg_info.in_use = 0;
spin_unlock_irqrestore(&interrupt->wait_list_lock, flags);
spin_unlock(&interrupt->wait_list_lock);
*status = HL_WAIT_CS_STATUS_COMPLETED;
@ -3284,7 +3354,7 @@ static int _hl_interrupt_wait_ioctl(struct hl_device *hdev, struct hl_ctx *ctx,
goto set_timestamp;
}
} else if (!timeout_us) {
spin_unlock_irqrestore(&interrupt->wait_list_lock, flags);
spin_unlock(&interrupt->wait_list_lock);
*status = HL_WAIT_CS_STATUS_BUSY;
pend->fence.timestamp = ktime_get();
goto set_timestamp;
@ -3309,7 +3379,7 @@ static int _hl_interrupt_wait_ioctl(struct hl_device *hdev, struct hl_ctx *ctx,
pend->ts_reg_info.in_use = 1;
list_add_tail(&pend->wait_list_node, &interrupt->wait_list_head);
spin_unlock_irqrestore(&interrupt->wait_list_lock, flags);
spin_unlock(&interrupt->wait_list_lock);
if (register_ts_record) {
rc = *status = HL_WAIT_CS_STATUS_COMPLETED;
@ -3353,9 +3423,9 @@ static int _hl_interrupt_wait_ioctl(struct hl_device *hdev, struct hl_ctx *ctx,
* for ts record, the node will be deleted in the irq handler after
* we reach the target value.
*/
spin_lock_irqsave(&interrupt->wait_list_lock, flags);
spin_lock(&interrupt->wait_list_lock);
list_del(&pend->wait_list_node);
spin_unlock_irqrestore(&interrupt->wait_list_lock, flags);
spin_unlock(&interrupt->wait_list_lock);
set_timestamp:
*timestamp = ktime_to_ns(pend->fence.timestamp);
@ -3383,7 +3453,7 @@ static int _hl_interrupt_wait_ioctl_user_addr(struct hl_device *hdev, struct hl_
u64 *timestamp)
{
struct hl_user_pending_interrupt *pend;
unsigned long timeout, flags;
unsigned long timeout;
u64 completion_value;
long completion_rc;
int rc = 0;
@ -3403,9 +3473,9 @@ static int _hl_interrupt_wait_ioctl_user_addr(struct hl_device *hdev, struct hl_
/* Add pending user interrupt to relevant list for the interrupt
* handler to monitor
*/
spin_lock_irqsave(&interrupt->wait_list_lock, flags);
spin_lock(&interrupt->wait_list_lock);
list_add_tail(&pend->wait_list_node, &interrupt->wait_list_head);
spin_unlock_irqrestore(&interrupt->wait_list_lock, flags);
spin_unlock(&interrupt->wait_list_lock);
/* We check for completion value as interrupt could have been received
* before we added the node to the wait list
@ -3436,14 +3506,14 @@ wait_again:
* If comparison fails, keep waiting until timeout expires
*/
if (completion_rc > 0) {
spin_lock_irqsave(&interrupt->wait_list_lock, flags);
spin_lock(&interrupt->wait_list_lock);
/* reinit_completion must be called before we check for user
* completion value, otherwise, if interrupt is received after
* the comparison and before the next wait_for_completion,
* we will reach timeout and fail
*/
reinit_completion(&pend->fence.completion);
spin_unlock_irqrestore(&interrupt->wait_list_lock, flags);
spin_unlock(&interrupt->wait_list_lock);
if (copy_from_user(&completion_value, u64_to_user_ptr(user_address), 8)) {
dev_err(hdev->dev, "Failed to copy completion value from user\n");
@ -3480,9 +3550,9 @@ wait_again:
}
remove_pending_user_interrupt:
spin_lock_irqsave(&interrupt->wait_list_lock, flags);
spin_lock(&interrupt->wait_list_lock);
list_del(&pend->wait_list_node);
spin_unlock_irqrestore(&interrupt->wait_list_lock, flags);
spin_unlock(&interrupt->wait_list_lock);
*timestamp = ktime_to_ns(pend->fence.timestamp);

View File

@ -258,7 +258,7 @@ static int vm_show(struct seq_file *s, void *data)
if (!dev_entry->hdev->mmu_enable)
return 0;
spin_lock(&dev_entry->ctx_mem_hash_spinlock);
mutex_lock(&dev_entry->ctx_mem_hash_mutex);
list_for_each_entry(ctx, &dev_entry->ctx_mem_hash_list, debugfs_list) {
once = false;
@ -329,7 +329,7 @@ static int vm_show(struct seq_file *s, void *data)
}
spin_unlock(&dev_entry->ctx_mem_hash_spinlock);
mutex_unlock(&dev_entry->ctx_mem_hash_mutex);
ctx = hl_get_compute_ctx(dev_entry->hdev);
if (ctx) {
@ -1583,59 +1583,183 @@ static const struct file_operations hl_debugfs_fops = {
.release = single_release,
};
static void add_secured_nodes(struct hl_dbg_device_entry *dev_entry)
static void add_secured_nodes(struct hl_dbg_device_entry *dev_entry, struct dentry *root)
{
debugfs_create_u8("i2c_bus",
0644,
dev_entry->root,
root,
&dev_entry->i2c_bus);
debugfs_create_u8("i2c_addr",
0644,
dev_entry->root,
root,
&dev_entry->i2c_addr);
debugfs_create_u8("i2c_reg",
0644,
dev_entry->root,
root,
&dev_entry->i2c_reg);
debugfs_create_u8("i2c_len",
0644,
dev_entry->root,
root,
&dev_entry->i2c_len);
debugfs_create_file("i2c_data",
0644,
dev_entry->root,
root,
dev_entry,
&hl_i2c_data_fops);
debugfs_create_file("led0",
0200,
dev_entry->root,
root,
dev_entry,
&hl_led0_fops);
debugfs_create_file("led1",
0200,
dev_entry->root,
root,
dev_entry,
&hl_led1_fops);
debugfs_create_file("led2",
0200,
dev_entry->root,
root,
dev_entry,
&hl_led2_fops);
}
static void add_files_to_device(struct hl_device *hdev, struct hl_dbg_device_entry *dev_entry,
struct dentry *root)
{
int count = ARRAY_SIZE(hl_debugfs_list);
struct hl_debugfs_entry *entry;
int i;
debugfs_create_x64("memory_scrub_val",
0644,
root,
&hdev->memory_scrub_val);
debugfs_create_file("memory_scrub",
0200,
root,
dev_entry,
&hl_mem_scrub_fops);
debugfs_create_x64("addr",
0644,
root,
&dev_entry->addr);
debugfs_create_file("data32",
0644,
root,
dev_entry,
&hl_data32b_fops);
debugfs_create_file("data64",
0644,
root,
dev_entry,
&hl_data64b_fops);
debugfs_create_file("set_power_state",
0200,
root,
dev_entry,
&hl_power_fops);
debugfs_create_file("device",
0200,
root,
dev_entry,
&hl_device_fops);
debugfs_create_file("clk_gate",
0200,
root,
dev_entry,
&hl_clk_gate_fops);
debugfs_create_file("stop_on_err",
0644,
root,
dev_entry,
&hl_stop_on_err_fops);
debugfs_create_file("dump_security_violations",
0644,
root,
dev_entry,
&hl_security_violations_fops);
debugfs_create_file("dump_razwi_events",
0644,
root,
dev_entry,
&hl_razwi_check_fops);
debugfs_create_file("dma_size",
0200,
root,
dev_entry,
&hl_dma_size_fops);
debugfs_create_blob("data_dma",
0400,
root,
&dev_entry->data_dma_blob_desc);
debugfs_create_file("monitor_dump_trig",
0200,
root,
dev_entry,
&hl_monitor_dump_fops);
debugfs_create_blob("monitor_dump",
0400,
root,
&dev_entry->mon_dump_blob_desc);
debugfs_create_x8("skip_reset_on_timeout",
0644,
root,
&hdev->reset_info.skip_reset_on_timeout);
debugfs_create_file("state_dump",
0600,
root,
dev_entry,
&hl_state_dump_fops);
debugfs_create_file("timeout_locked",
0644,
root,
dev_entry,
&hl_timeout_locked_fops);
debugfs_create_u32("device_release_watchdog_timeout",
0644,
root,
&hdev->device_release_watchdog_timeout_sec);
for (i = 0, entry = dev_entry->entry_arr ; i < count ; i++, entry++) {
debugfs_create_file(hl_debugfs_list[i].name,
0444,
root,
entry,
&hl_debugfs_fops);
entry->info_ent = &hl_debugfs_list[i];
entry->dev_entry = dev_entry;
}
}
void hl_debugfs_add_device(struct hl_device *hdev)
{
struct hl_dbg_device_entry *dev_entry = &hdev->hl_debugfs;
int count = ARRAY_SIZE(hl_debugfs_list);
struct hl_debugfs_entry *entry;
int i;
dev_entry->hdev = hdev;
dev_entry->entry_arr = kmalloc_array(count,
@ -1661,131 +1785,14 @@ void hl_debugfs_add_device(struct hl_device *hdev)
spin_lock_init(&dev_entry->cs_spinlock);
spin_lock_init(&dev_entry->cs_job_spinlock);
spin_lock_init(&dev_entry->userptr_spinlock);
spin_lock_init(&dev_entry->ctx_mem_hash_spinlock);
mutex_init(&dev_entry->ctx_mem_hash_mutex);
dev_entry->root = debugfs_create_dir(dev_name(hdev->dev),
hl_debug_root);
debugfs_create_x64("memory_scrub_val",
0644,
dev_entry->root,
&hdev->memory_scrub_val);
debugfs_create_file("memory_scrub",
0200,
dev_entry->root,
dev_entry,
&hl_mem_scrub_fops);
debugfs_create_x64("addr",
0644,
dev_entry->root,
&dev_entry->addr);
debugfs_create_file("data32",
0644,
dev_entry->root,
dev_entry,
&hl_data32b_fops);
debugfs_create_file("data64",
0644,
dev_entry->root,
dev_entry,
&hl_data64b_fops);
debugfs_create_file("set_power_state",
0200,
dev_entry->root,
dev_entry,
&hl_power_fops);
debugfs_create_file("device",
0200,
dev_entry->root,
dev_entry,
&hl_device_fops);
debugfs_create_file("clk_gate",
0200,
dev_entry->root,
dev_entry,
&hl_clk_gate_fops);
debugfs_create_file("stop_on_err",
0644,
dev_entry->root,
dev_entry,
&hl_stop_on_err_fops);
debugfs_create_file("dump_security_violations",
0644,
dev_entry->root,
dev_entry,
&hl_security_violations_fops);
debugfs_create_file("dump_razwi_events",
0644,
dev_entry->root,
dev_entry,
&hl_razwi_check_fops);
debugfs_create_file("dma_size",
0200,
dev_entry->root,
dev_entry,
&hl_dma_size_fops);
debugfs_create_blob("data_dma",
0400,
dev_entry->root,
&dev_entry->data_dma_blob_desc);
debugfs_create_file("monitor_dump_trig",
0200,
dev_entry->root,
dev_entry,
&hl_monitor_dump_fops);
debugfs_create_blob("monitor_dump",
0400,
dev_entry->root,
&dev_entry->mon_dump_blob_desc);
debugfs_create_x8("skip_reset_on_timeout",
0644,
dev_entry->root,
&hdev->reset_info.skip_reset_on_timeout);
debugfs_create_file("state_dump",
0600,
dev_entry->root,
dev_entry,
&hl_state_dump_fops);
debugfs_create_file("timeout_locked",
0644,
dev_entry->root,
dev_entry,
&hl_timeout_locked_fops);
debugfs_create_u32("device_release_watchdog_timeout",
0644,
dev_entry->root,
&hdev->device_release_watchdog_timeout_sec);
for (i = 0, entry = dev_entry->entry_arr ; i < count ; i++, entry++) {
debugfs_create_file(hl_debugfs_list[i].name,
0444,
dev_entry->root,
entry,
&hl_debugfs_fops);
entry->info_ent = &hl_debugfs_list[i];
entry->dev_entry = dev_entry;
}
add_files_to_device(hdev, dev_entry, dev_entry->root);
if (!hdev->asic_prop.fw_security_enabled)
add_secured_nodes(dev_entry);
add_secured_nodes(dev_entry, dev_entry->root);
}
void hl_debugfs_remove_device(struct hl_device *hdev)
@ -1795,6 +1802,7 @@ void hl_debugfs_remove_device(struct hl_device *hdev)
debugfs_remove_recursive(entry->root);
mutex_destroy(&entry->ctx_mem_hash_mutex);
mutex_destroy(&entry->file_mutex);
vfree(entry->data_dma_blob_desc.data);
@ -1901,18 +1909,18 @@ void hl_debugfs_add_ctx_mem_hash(struct hl_device *hdev, struct hl_ctx *ctx)
{
struct hl_dbg_device_entry *dev_entry = &hdev->hl_debugfs;
spin_lock(&dev_entry->ctx_mem_hash_spinlock);
mutex_lock(&dev_entry->ctx_mem_hash_mutex);
list_add(&ctx->debugfs_list, &dev_entry->ctx_mem_hash_list);
spin_unlock(&dev_entry->ctx_mem_hash_spinlock);
mutex_unlock(&dev_entry->ctx_mem_hash_mutex);
}
void hl_debugfs_remove_ctx_mem_hash(struct hl_device *hdev, struct hl_ctx *ctx)
{
struct hl_dbg_device_entry *dev_entry = &hdev->hl_debugfs;
spin_lock(&dev_entry->ctx_mem_hash_spinlock);
mutex_lock(&dev_entry->ctx_mem_hash_mutex);
list_del(&ctx->debugfs_list);
spin_unlock(&dev_entry->ctx_mem_hash_spinlock);
mutex_unlock(&dev_entry->ctx_mem_hash_mutex);
}
/**

View File

@ -43,36 +43,44 @@ static void dec_print_abnrm_intr_source(struct hl_device *hdev, u32 irq_status)
intr_source[2], intr_source[3], intr_source[4], intr_source[5]);
}
static void dec_error_intr_work(struct hl_device *hdev, u32 base_addr, u32 core_id)
static void dec_abnrm_intr_work(struct work_struct *work)
{
struct hl_dec *dec = container_of(work, struct hl_dec, abnrm_intr_work);
struct hl_device *hdev = dec->hdev;
u32 irq_status, event_mask = 0;
bool reset_required = false;
u32 irq_status;
irq_status = RREG32(base_addr + VCMD_IRQ_STATUS_OFFSET);
irq_status = RREG32(dec->base_addr + VCMD_IRQ_STATUS_OFFSET);
dev_err(hdev->dev, "Decoder abnormal interrupt %#x, core %d\n", irq_status, core_id);
dev_err(hdev->dev, "Decoder abnormal interrupt %#x, core %d\n", irq_status, dec->core_id);
dec_print_abnrm_intr_source(hdev, irq_status);
if (irq_status & VCMD_IRQ_STATUS_TIMEOUT_MASK)
reset_required = true;
/* Clear the interrupt */
WREG32(base_addr + VCMD_IRQ_STATUS_OFFSET, irq_status);
WREG32(dec->base_addr + VCMD_IRQ_STATUS_OFFSET, irq_status);
/* Flush the interrupt clear */
RREG32(base_addr + VCMD_IRQ_STATUS_OFFSET);
RREG32(dec->base_addr + VCMD_IRQ_STATUS_OFFSET);
if (reset_required)
hl_device_reset(hdev, HL_DRV_RESET_HARD);
}
if (irq_status & VCMD_IRQ_STATUS_TIMEOUT_MASK) {
reset_required = true;
event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR;
}
static void dec_completion_abnrm(struct work_struct *work)
{
struct hl_dec *dec = container_of(work, struct hl_dec, completion_abnrm_work);
struct hl_device *hdev = dec->hdev;
if (irq_status & VCMD_IRQ_STATUS_CMDERR_MASK)
event_mask |= HL_NOTIFIER_EVENT_UNDEFINED_OPCODE;
dec_error_intr_work(hdev, dec->base_addr, dec->core_id);
if (irq_status & (VCMD_IRQ_STATUS_ENDCMD_MASK |
VCMD_IRQ_STATUS_BUSERR_MASK |
VCMD_IRQ_STATUS_ABORT_MASK))
event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR;
if (reset_required) {
event_mask |= HL_NOTIFIER_EVENT_DEVICE_RESET;
hl_device_cond_reset(hdev, 0, event_mask);
} else if (event_mask) {
hl_notifier_event_send_all(hdev, event_mask);
}
}
void hl_dec_fini(struct hl_device *hdev)
@ -98,7 +106,7 @@ int hl_dec_init(struct hl_device *hdev)
dec = hdev->dec + j;
dec->hdev = hdev;
INIT_WORK(&dec->completion_abnrm_work, dec_completion_abnrm);
INIT_WORK(&dec->abnrm_intr_work, dec_abnrm_intr_work);
dec->core_id = j;
dec->base_addr = hdev->asic_funcs->get_dec_base_addr(hdev, j);
if (!dec->base_addr) {

View File

@ -22,7 +22,6 @@
enum dma_alloc_type {
DMA_ALLOC_COHERENT,
DMA_ALLOC_CPU_ACCESSIBLE,
DMA_ALLOC_POOL,
};
@ -121,9 +120,6 @@ static void *hl_dma_alloc_common(struct hl_device *hdev, size_t size, dma_addr_t
case DMA_ALLOC_COHERENT:
ptr = hdev->asic_funcs->asic_dma_alloc_coherent(hdev, size, dma_handle, flag);
break;
case DMA_ALLOC_CPU_ACCESSIBLE:
ptr = hdev->asic_funcs->cpu_accessible_dma_pool_alloc(hdev, size, dma_handle);
break;
case DMA_ALLOC_POOL:
ptr = hdev->asic_funcs->asic_dma_pool_zalloc(hdev, size, flag, dma_handle);
break;
@ -147,9 +143,6 @@ static void hl_asic_dma_free_common(struct hl_device *hdev, size_t size, void *c
case DMA_ALLOC_COHERENT:
hdev->asic_funcs->asic_dma_free_coherent(hdev, size, cpu_addr, dma_handle);
break;
case DMA_ALLOC_CPU_ACCESSIBLE:
hdev->asic_funcs->cpu_accessible_dma_pool_free(hdev, size, cpu_addr);
break;
case DMA_ALLOC_POOL:
hdev->asic_funcs->asic_dma_pool_free(hdev, cpu_addr, dma_handle);
break;
@ -170,18 +163,6 @@ void hl_asic_dma_free_coherent_caller(struct hl_device *hdev, size_t size, void
hl_asic_dma_free_common(hdev, size, cpu_addr, dma_handle, DMA_ALLOC_COHERENT, caller);
}
void *hl_cpu_accessible_dma_pool_alloc_caller(struct hl_device *hdev, size_t size,
dma_addr_t *dma_handle, const char *caller)
{
return hl_dma_alloc_common(hdev, size, dma_handle, 0, DMA_ALLOC_CPU_ACCESSIBLE, caller);
}
void hl_cpu_accessible_dma_pool_free_caller(struct hl_device *hdev, size_t size, void *vaddr,
const char *caller)
{
hl_asic_dma_free_common(hdev, size, vaddr, 0, DMA_ALLOC_CPU_ACCESSIBLE, caller);
}
void *hl_asic_dma_pool_zalloc_caller(struct hl_device *hdev, size_t size, gfp_t mem_flags,
dma_addr_t *dma_handle, const char *caller)
{
@ -194,6 +175,16 @@ void hl_asic_dma_pool_free_caller(struct hl_device *hdev, void *vaddr, dma_addr_
hl_asic_dma_free_common(hdev, 0, vaddr, dma_addr, DMA_ALLOC_POOL, caller);
}
void *hl_cpu_accessible_dma_pool_alloc(struct hl_device *hdev, size_t size, dma_addr_t *dma_handle)
{
return hdev->asic_funcs->cpu_accessible_dma_pool_alloc(hdev, size, dma_handle);
}
void hl_cpu_accessible_dma_pool_free(struct hl_device *hdev, size_t size, void *vaddr)
{
hdev->asic_funcs->cpu_accessible_dma_pool_free(hdev, size, vaddr);
}
int hl_dma_map_sgtable(struct hl_device *hdev, struct sg_table *sgt, enum dma_data_direction dir)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
@ -389,18 +380,17 @@ bool hl_ctrl_device_operational(struct hl_device *hdev,
static void print_idle_status_mask(struct hl_device *hdev, const char *message,
u64 idle_mask[HL_BUSY_ENGINES_MASK_EXT_SIZE])
{
u32 pad_width[HL_BUSY_ENGINES_MASK_EXT_SIZE] = {};
BUILD_BUG_ON(HL_BUSY_ENGINES_MASK_EXT_SIZE != 4);
pad_width[3] = idle_mask[3] ? 16 : 0;
pad_width[2] = idle_mask[2] || pad_width[3] ? 16 : 0;
pad_width[1] = idle_mask[1] || pad_width[2] ? 16 : 0;
pad_width[0] = idle_mask[0] || pad_width[1] ? 16 : 0;
dev_err(hdev->dev, "%s (mask %0*llx_%0*llx_%0*llx_%0*llx)\n",
message, pad_width[3], idle_mask[3], pad_width[2], idle_mask[2],
pad_width[1], idle_mask[1], pad_width[0], idle_mask[0]);
if (idle_mask[3])
dev_err(hdev->dev, "%s (mask %#llx_%016llx_%016llx_%016llx)\n",
message, idle_mask[3], idle_mask[2], idle_mask[1], idle_mask[0]);
else if (idle_mask[2])
dev_err(hdev->dev, "%s (mask %#llx_%016llx_%016llx)\n",
message, idle_mask[2], idle_mask[1], idle_mask[0]);
else if (idle_mask[1])
dev_err(hdev->dev, "%s (mask %#llx_%016llx)\n",
message, idle_mask[1], idle_mask[0]);
else
dev_err(hdev->dev, "%s (mask %#llx)\n", message, idle_mask[0]);
}
static void hpriv_release(struct kref *ref)
@ -423,6 +413,9 @@ static void hpriv_release(struct kref *ref)
mutex_destroy(&hpriv->ctx_lock);
mutex_destroy(&hpriv->restore_phase_mutex);
/* There should be no memory buffers at this point and handles IDR can be destroyed */
hl_mem_mgr_idr_destroy(&hpriv->mem_mgr);
/* Device should be reset if reset-upon-device-release is enabled, or if there is a pending
* reset that waits for device release.
*/
@ -492,6 +485,36 @@ int hl_hpriv_put(struct hl_fpriv *hpriv)
return kref_put(&hpriv->refcount, hpriv_release);
}
static void print_device_in_use_info(struct hl_device *hdev, const char *message)
{
u32 active_cs_num, dmabuf_export_cnt;
bool unknown_reason = true;
char buf[128];
size_t size;
int offset;
size = sizeof(buf);
offset = 0;
active_cs_num = hl_get_active_cs_num(hdev);
if (active_cs_num) {
unknown_reason = false;
offset += scnprintf(buf + offset, size - offset, " [%u active CS]", active_cs_num);
}
dmabuf_export_cnt = atomic_read(&hdev->dmabuf_export_cnt);
if (dmabuf_export_cnt) {
unknown_reason = false;
offset += scnprintf(buf + offset, size - offset, " [%u exported dma-buf]",
dmabuf_export_cnt);
}
if (unknown_reason)
scnprintf(buf + offset, size - offset, " [unknown reason]");
dev_notice(hdev->dev, "%s%s\n", message, buf);
}
/*
* hl_device_release - release function for habanalabs device
*
@ -514,17 +537,20 @@ static int hl_device_release(struct inode *inode, struct file *filp)
}
hl_ctx_mgr_fini(hdev, &hpriv->ctx_mgr);
/* Memory buffers might be still in use at this point and thus the handles IDR destruction
* is postponed to hpriv_release().
*/
hl_mem_mgr_fini(&hpriv->mem_mgr);
hdev->compute_ctx_in_release = 1;
if (!hl_hpriv_put(hpriv)) {
dev_notice(hdev->dev, "User process closed FD but device still in use\n");
print_device_in_use_info(hdev, "User process closed FD but device still in use");
hl_device_reset(hdev, HL_DRV_RESET_HARD);
}
hdev->last_open_session_duration_jif =
jiffies - hdev->last_successful_open_jif;
hdev->last_open_session_duration_jif = jiffies - hdev->last_successful_open_jif;
return 0;
}
@ -617,7 +643,7 @@ static void device_release_func(struct device *dev)
* device_init_cdev - Initialize cdev and device for habanalabs device
*
* @hdev: pointer to habanalabs device structure
* @hclass: pointer to the class object of the device
* @class: pointer to the class object of the device
* @minor: minor number of the specific device
* @fpos: file operations to install for this device
* @name: name of the device as it will appear in the filesystem
@ -626,7 +652,7 @@ static void device_release_func(struct device *dev)
*
* Initialize a cdev and a Linux device for habanalabs's device.
*/
static int device_init_cdev(struct hl_device *hdev, struct class *hclass,
static int device_init_cdev(struct hl_device *hdev, struct class *class,
int minor, const struct file_operations *fops,
char *name, struct cdev *cdev,
struct device **dev)
@ -640,7 +666,7 @@ static int device_init_cdev(struct hl_device *hdev, struct class *hclass,
device_initialize(*dev);
(*dev)->devt = MKDEV(hdev->major, minor);
(*dev)->class = hclass;
(*dev)->class = class;
(*dev)->release = device_release_func;
dev_set_drvdata(*dev, hdev);
dev_set_name(*dev, "%s", name);
@ -733,14 +759,14 @@ static void device_hard_reset_pending(struct work_struct *work)
static void device_release_watchdog_func(struct work_struct *work)
{
struct hl_device_reset_work *device_release_watchdog_work =
container_of(work, struct hl_device_reset_work, reset_work.work);
struct hl_device *hdev = device_release_watchdog_work->hdev;
struct hl_device_reset_work *watchdog_work =
container_of(work, struct hl_device_reset_work, reset_work.work);
struct hl_device *hdev = watchdog_work->hdev;
u32 flags;
dev_dbg(hdev->dev, "Device wasn't released in time. Initiate device reset.\n");
dev_dbg(hdev->dev, "Device wasn't released in time. Initiate hard-reset.\n");
flags = device_release_watchdog_work->flags | HL_DRV_RESET_FROM_WD_THR;
flags = watchdog_work->flags | HL_DRV_RESET_HARD | HL_DRV_RESET_FROM_WD_THR;
hl_device_reset(hdev, flags);
}
@ -805,7 +831,7 @@ static int device_early_init(struct hl_device *hdev)
}
for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++) {
snprintf(workq_name, 32, "hl-free-jobs-%u", (u32) i);
snprintf(workq_name, 32, "hl%u-free-jobs-%u", hdev->cdev_idx, (u32) i);
hdev->cq_wq[i] = create_singlethread_workqueue(workq_name);
if (hdev->cq_wq[i] == NULL) {
dev_err(hdev->dev, "Failed to allocate CQ workqueue\n");
@ -814,14 +840,16 @@ static int device_early_init(struct hl_device *hdev)
}
}
hdev->eq_wq = create_singlethread_workqueue("hl-events");
snprintf(workq_name, 32, "hl%u-events", hdev->cdev_idx);
hdev->eq_wq = create_singlethread_workqueue(workq_name);
if (hdev->eq_wq == NULL) {
dev_err(hdev->dev, "Failed to allocate EQ workqueue\n");
rc = -ENOMEM;
goto free_cq_wq;
}
hdev->cs_cmplt_wq = alloc_workqueue("hl-cs-completions", WQ_UNBOUND, 0);
snprintf(workq_name, 32, "hl%u-cs-completions", hdev->cdev_idx);
hdev->cs_cmplt_wq = alloc_workqueue(workq_name, WQ_UNBOUND, 0);
if (!hdev->cs_cmplt_wq) {
dev_err(hdev->dev,
"Failed to allocate CS completions workqueue\n");
@ -829,7 +857,8 @@ static int device_early_init(struct hl_device *hdev)
goto free_eq_wq;
}
hdev->ts_free_obj_wq = alloc_workqueue("hl-ts-free-obj", WQ_UNBOUND, 0);
snprintf(workq_name, 32, "hl%u-ts-free-obj", hdev->cdev_idx);
hdev->ts_free_obj_wq = alloc_workqueue(workq_name, WQ_UNBOUND, 0);
if (!hdev->ts_free_obj_wq) {
dev_err(hdev->dev,
"Failed to allocate Timestamp registration free workqueue\n");
@ -837,15 +866,15 @@ static int device_early_init(struct hl_device *hdev)
goto free_cs_cmplt_wq;
}
hdev->prefetch_wq = alloc_workqueue("hl-prefetch", WQ_UNBOUND, 0);
snprintf(workq_name, 32, "hl%u-prefetch", hdev->cdev_idx);
hdev->prefetch_wq = alloc_workqueue(workq_name, WQ_UNBOUND, 0);
if (!hdev->prefetch_wq) {
dev_err(hdev->dev, "Failed to allocate MMU prefetch workqueue\n");
rc = -ENOMEM;
goto free_ts_free_wq;
}
hdev->hl_chip_info = kzalloc(sizeof(struct hwmon_chip_info),
GFP_KERNEL);
hdev->hl_chip_info = kzalloc(sizeof(struct hwmon_chip_info), GFP_KERNEL);
if (!hdev->hl_chip_info) {
rc = -ENOMEM;
goto free_prefetch_wq;
@ -857,7 +886,8 @@ static int device_early_init(struct hl_device *hdev)
hl_mem_mgr_init(hdev->dev, &hdev->kernel_mem_mgr);
hdev->reset_wq = create_singlethread_workqueue("hl_device_reset");
snprintf(workq_name, 32, "hl%u_device_reset", hdev->cdev_idx);
hdev->reset_wq = create_singlethread_workqueue(workq_name);
if (!hdev->reset_wq) {
rc = -ENOMEM;
dev_err(hdev->dev, "Failed to create device reset WQ\n");
@ -887,6 +917,7 @@ static int device_early_init(struct hl_device *hdev)
free_cb_mgr:
hl_mem_mgr_fini(&hdev->kernel_mem_mgr);
hl_mem_mgr_idr_destroy(&hdev->kernel_mem_mgr);
free_chip_info:
kfree(hdev->hl_chip_info);
free_prefetch_wq:
@ -930,6 +961,7 @@ static void device_early_fini(struct hl_device *hdev)
mutex_destroy(&hdev->clk_throttling.lock);
hl_mem_mgr_fini(&hdev->kernel_mem_mgr);
hl_mem_mgr_idr_destroy(&hdev->kernel_mem_mgr);
kfree(hdev->hl_chip_info);
@ -953,6 +985,8 @@ static void hl_device_heartbeat(struct work_struct *work)
{
struct hl_device *hdev = container_of(work, struct hl_device,
work_heartbeat.work);
struct hl_info_fw_err_info info = {0};
u64 event_mask = HL_NOTIFIER_EVENT_DEVICE_RESET | HL_NOTIFIER_EVENT_DEVICE_UNAVAILABLE;
if (!hl_device_operational(hdev, NULL))
goto reschedule;
@ -963,7 +997,10 @@ static void hl_device_heartbeat(struct work_struct *work)
if (hl_device_operational(hdev, NULL))
dev_err(hdev->dev, "Device heartbeat failed!\n");
hl_device_reset(hdev, HL_DRV_RESET_HARD | HL_DRV_RESET_HEARTBEAT);
info.err_type = HL_INFO_FW_HEARTBEAT_ERR;
info.event_mask = &event_mask;
hl_handle_fw_err(hdev, &info);
hl_device_cond_reset(hdev, HL_DRV_RESET_HARD | HL_DRV_RESET_HEARTBEAT, event_mask);
return;
@ -1234,7 +1271,6 @@ int hl_device_resume(struct hl_device *hdev)
return 0;
disable_device:
pci_clear_master(hdev->pdev);
pci_disable_device(hdev->pdev);
return rc;
@ -1344,6 +1380,34 @@ static void device_disable_open_processes(struct hl_device *hdev, bool control_d
mutex_unlock(fd_lock);
}
static void send_disable_pci_access(struct hl_device *hdev, u32 flags)
{
/* If reset is due to heartbeat, device CPU is no responsive in
* which case no point sending PCI disable message to it.
*/
if ((flags & HL_DRV_RESET_HARD) &&
!(flags & (HL_DRV_RESET_HEARTBEAT | HL_DRV_RESET_BYPASS_REQ_TO_FW))) {
/* Disable PCI access from device F/W so he won't send
* us additional interrupts. We disable MSI/MSI-X at
* the halt_engines function and we can't have the F/W
* sending us interrupts after that. We need to disable
* the access here because if the device is marked
* disable, the message won't be send. Also, in case
* of heartbeat, the device CPU is marked as disable
* so this message won't be sent
*/
if (hl_fw_send_pci_access_msg(hdev, CPUCP_PACKET_DISABLE_PCI_ACCESS, 0x0)) {
dev_warn(hdev->dev, "Failed to disable FW's PCI access\n");
return;
}
/* verify that last EQs are handled before disabled is set */
if (hdev->cpu_queues_enable)
synchronize_irq(pci_irq_vector(hdev->pdev,
hdev->asic_prop.eq_interrupt_id));
}
}
static void handle_reset_trigger(struct hl_device *hdev, u32 flags)
{
u32 cur_reset_trigger = HL_RESET_TRIGGER_DEFAULT;
@ -1382,28 +1446,6 @@ static void handle_reset_trigger(struct hl_device *hdev, u32 flags)
} else {
hdev->reset_info.reset_trigger_repeated = 1;
}
/* If reset is due to heartbeat, device CPU is no responsive in
* which case no point sending PCI disable message to it.
*
* If F/W is performing the reset, no need to send it a message to disable
* PCI access
*/
if ((flags & HL_DRV_RESET_HARD) &&
!(flags & (HL_DRV_RESET_HEARTBEAT | HL_DRV_RESET_BYPASS_REQ_TO_FW))) {
/* Disable PCI access from device F/W so he won't send
* us additional interrupts. We disable MSI/MSI-X at
* the halt_engines function and we can't have the F/W
* sending us interrupts after that. We need to disable
* the access here because if the device is marked
* disable, the message won't be send. Also, in case
* of heartbeat, the device CPU is marked as disable
* so this message won't be sent
*/
if (hl_fw_send_pci_access_msg(hdev, CPUCP_PACKET_DISABLE_PCI_ACCESS, 0x0))
dev_warn(hdev->dev,
"Failed to disable PCI access by F/W\n");
}
}
/*
@ -1424,12 +1466,11 @@ static void handle_reset_trigger(struct hl_device *hdev, u32 flags)
*/
int hl_device_reset(struct hl_device *hdev, u32 flags)
{
bool hard_reset, from_hard_reset_thread, fw_reset, hard_instead_soft = false,
reset_upon_device_release = false, schedule_hard_reset = false,
delay_reset, from_dev_release, from_watchdog_thread;
bool hard_reset, from_hard_reset_thread, fw_reset, reset_upon_device_release,
schedule_hard_reset = false, delay_reset, from_dev_release, from_watchdog_thread;
u64 idle_mask[HL_BUSY_ENGINES_MASK_EXT_SIZE] = {0};
struct hl_ctx *ctx;
int i, rc;
int i, rc, hw_fini_rc;
if (!hdev->init_done) {
dev_err(hdev->dev, "Can't reset before initialization is done\n");
@ -1442,6 +1483,7 @@ int hl_device_reset(struct hl_device *hdev, u32 flags)
from_dev_release = !!(flags & HL_DRV_RESET_DEV_RELEASE);
delay_reset = !!(flags & HL_DRV_RESET_DELAY);
from_watchdog_thread = !!(flags & HL_DRV_RESET_FROM_WD_THR);
reset_upon_device_release = hdev->reset_upon_device_release && from_dev_release;
if (!hard_reset && (hl_device_status(hdev) == HL_DEVICE_STATUS_MALFUNCTION)) {
dev_dbg(hdev->dev, "soft-reset isn't supported on a malfunctioning device\n");
@ -1449,30 +1491,26 @@ int hl_device_reset(struct hl_device *hdev, u32 flags)
}
if (!hard_reset && !hdev->asic_prop.supports_compute_reset) {
hard_instead_soft = true;
dev_dbg(hdev->dev, "asic doesn't support compute reset - do hard-reset instead\n");
hard_reset = true;
}
if (hdev->reset_upon_device_release && from_dev_release) {
if (reset_upon_device_release) {
if (hard_reset) {
dev_crit(hdev->dev,
"Aborting reset because hard-reset is mutually exclusive with reset-on-device-release\n");
return -EINVAL;
}
reset_upon_device_release = true;
goto do_reset;
}
if (!hard_reset && !hdev->asic_prop.allow_inference_soft_reset) {
hard_instead_soft = true;
dev_dbg(hdev->dev,
"asic doesn't allow inference soft reset - do hard-reset instead\n");
hard_reset = true;
}
if (hard_instead_soft)
dev_dbg(hdev->dev, "Doing hard-reset instead of compute reset\n");
do_reset:
/* Re-entry of reset thread */
if (from_hard_reset_thread && hdev->process_kill_trial_cnt)
@ -1480,14 +1518,14 @@ do_reset:
/*
* Prevent concurrency in this function - only one reset should be
* done at any given time. Only need to perform this if we didn't
* get from the dedicated hard reset thread
* done at any given time. We need to perform this only if we didn't
* get here from a dedicated hard reset thread.
*/
if (!from_hard_reset_thread) {
/* Block future CS/VM/JOB completion operations */
spin_lock(&hdev->reset_info.lock);
if (hdev->reset_info.in_reset) {
/* We only allow scheduling of a hard reset during compute reset */
/* We allow scheduling of a hard reset only during a compute reset */
if (hard_reset && hdev->reset_info.in_compute_reset)
hdev->reset_info.hard_reset_schedule_flags = flags;
spin_unlock(&hdev->reset_info.lock);
@ -1505,15 +1543,17 @@ do_reset:
/* Cancel the device release watchdog work if required.
* In case of reset-upon-device-release while the release watchdog work is
* scheduled, do hard-reset instead of compute-reset.
* scheduled due to a hard-reset, do hard-reset instead of compute-reset.
*/
if ((hard_reset || from_dev_release) && hdev->reset_info.watchdog_active) {
struct hl_device_reset_work *watchdog_work =
&hdev->device_release_watchdog_work;
hdev->reset_info.watchdog_active = 0;
if (!from_watchdog_thread)
cancel_delayed_work_sync(
&hdev->device_release_watchdog_work.reset_work);
cancel_delayed_work_sync(&watchdog_work->reset_work);
if (from_dev_release) {
if (from_dev_release && (watchdog_work->flags & HL_DRV_RESET_HARD)) {
hdev->reset_info.in_compute_reset = 0;
flags |= HL_DRV_RESET_HARD;
flags &= ~HL_DRV_RESET_DEV_RELEASE;
@ -1524,7 +1564,9 @@ do_reset:
if (delay_reset)
usleep_range(HL_RESET_DELAY_USEC, HL_RESET_DELAY_USEC << 1);
escalate_reset_flow:
handle_reset_trigger(hdev, flags);
send_disable_pci_access(hdev, flags);
/* This also blocks future CS/VM/JOB completion operations */
hdev->disabled = true;
@ -1539,7 +1581,6 @@ do_reset:
dev_dbg(hdev->dev, "Going to reset engines of inference device\n");
}
again:
if ((hard_reset) && (!from_hard_reset_thread)) {
hdev->reset_info.hard_reset_pending = true;
@ -1592,7 +1633,7 @@ kill_processes:
}
/* Reset the H/W. It will be in idle state after this returns */
hdev->asic_funcs->hw_fini(hdev, hard_reset, fw_reset);
hw_fini_rc = hdev->asic_funcs->hw_fini(hdev, hard_reset, fw_reset);
if (hard_reset) {
hdev->fw_loader.fw_comp_loaded = FW_TYPE_NONE;
@ -1619,6 +1660,10 @@ kill_processes:
hl_ctx_put(ctx);
}
if (hw_fini_rc) {
rc = hw_fini_rc;
goto out_err;
}
/* Finished tear-down, starting to re-initialize */
if (hard_reset) {
@ -1784,10 +1829,8 @@ kill_processes:
dev_info(hdev->dev, "Performing hard reset scheduled during compute reset\n");
flags = hdev->reset_info.hard_reset_schedule_flags;
hdev->reset_info.hard_reset_schedule_flags = 0;
hdev->disabled = true;
hard_reset = true;
handle_reset_trigger(hdev, flags);
goto again;
goto escalate_reset_flow;
}
}
@ -1804,20 +1847,19 @@ out_err:
"%s Failed to reset! Device is NOT usable\n",
dev_name(&(hdev)->pdev->dev));
hdev->reset_info.hard_reset_cnt++;
} else if (reset_upon_device_release) {
spin_unlock(&hdev->reset_info.lock);
dev_err(hdev->dev, "Failed to reset device after user release\n");
flags |= HL_DRV_RESET_HARD;
flags &= ~HL_DRV_RESET_DEV_RELEASE;
hard_reset = true;
goto again;
} else {
if (reset_upon_device_release) {
dev_err(hdev->dev, "Failed to reset device after user release\n");
flags &= ~HL_DRV_RESET_DEV_RELEASE;
} else {
dev_err(hdev->dev, "Failed to do compute reset\n");
hdev->reset_info.compute_reset_cnt++;
}
spin_unlock(&hdev->reset_info.lock);
dev_err(hdev->dev, "Failed to do compute reset\n");
hdev->reset_info.compute_reset_cnt++;
flags |= HL_DRV_RESET_HARD;
hard_reset = true;
goto again;
goto escalate_reset_flow;
}
hdev->reset_info.in_reset = 0;
@ -1840,10 +1882,6 @@ int hl_device_cond_reset(struct hl_device *hdev, u32 flags, u64 event_mask)
{
struct hl_ctx *ctx = NULL;
/* Device release watchdog is only for hard reset */
if (!(flags & HL_DRV_RESET_HARD) && hdev->asic_prop.allow_inference_soft_reset)
goto device_reset;
/* F/W reset cannot be postponed */
if (flags & HL_DRV_RESET_BYPASS_REQ_TO_FW)
goto device_reset;
@ -1871,7 +1909,7 @@ int hl_device_cond_reset(struct hl_device *hdev, u32 flags, u64 event_mask)
goto out;
hdev->device_release_watchdog_work.flags = flags;
dev_dbg(hdev->dev, "Device is going to be reset in %u sec unless being released\n",
dev_dbg(hdev->dev, "Device is going to be hard-reset in %u sec unless being released\n",
hdev->device_release_watchdog_timeout_sec);
schedule_delayed_work(&hdev->device_release_watchdog_work.reset_work,
msecs_to_jiffies(hdev->device_release_watchdog_timeout_sec * 1000));
@ -1939,6 +1977,51 @@ void hl_notifier_event_send_all(struct hl_device *hdev, u64 event_mask)
mutex_unlock(&hdev->fpriv_ctrl_list_lock);
}
static int create_cdev(struct hl_device *hdev)
{
char *name;
int rc;
hdev->cdev_idx = hdev->id / 2;
name = kasprintf(GFP_KERNEL, "hl%d", hdev->cdev_idx);
if (!name) {
rc = -ENOMEM;
goto out_err;
}
/* Initialize cdev and device structures */
rc = device_init_cdev(hdev, hdev->hclass, hdev->id, &hl_ops, name,
&hdev->cdev, &hdev->dev);
kfree(name);
if (rc)
goto out_err;
name = kasprintf(GFP_KERNEL, "hl_controlD%d", hdev->cdev_idx);
if (!name) {
rc = -ENOMEM;
goto free_dev;
}
/* Initialize cdev and device structures for control device */
rc = device_init_cdev(hdev, hdev->hclass, hdev->id_control, &hl_ctrl_ops,
name, &hdev->cdev_ctrl, &hdev->dev_ctrl);
kfree(name);
if (rc)
goto free_dev;
return 0;
free_dev:
put_device(hdev->dev);
out_err:
return rc;
}
/*
* hl_device_init - main initialization function for habanalabs device
*
@ -1948,48 +2031,19 @@ void hl_notifier_event_send_all(struct hl_device *hdev, u64 event_mask)
* ASIC specific initialization functions. Finally, create the cdev and the
* Linux device to expose it to the user
*/
int hl_device_init(struct hl_device *hdev, struct class *hclass)
int hl_device_init(struct hl_device *hdev)
{
int i, rc, cq_cnt, user_interrupt_cnt, cq_ready_cnt;
char *name;
bool add_cdev_sysfs_on_err = false;
hdev->cdev_idx = hdev->id / 2;
name = kasprintf(GFP_KERNEL, "hl%d", hdev->cdev_idx);
if (!name) {
rc = -ENOMEM;
goto out_disabled;
}
/* Initialize cdev and device structures */
rc = device_init_cdev(hdev, hclass, hdev->id, &hl_ops, name,
&hdev->cdev, &hdev->dev);
kfree(name);
rc = create_cdev(hdev);
if (rc)
goto out_disabled;
name = kasprintf(GFP_KERNEL, "hl_controlD%d", hdev->cdev_idx);
if (!name) {
rc = -ENOMEM;
goto free_dev;
}
/* Initialize cdev and device structures for control device */
rc = device_init_cdev(hdev, hclass, hdev->id_control, &hl_ctrl_ops,
name, &hdev->cdev_ctrl, &hdev->dev_ctrl);
kfree(name);
if (rc)
goto free_dev;
/* Initialize ASIC function pointers and perform early init */
rc = device_early_init(hdev);
if (rc)
goto free_dev_ctrl;
goto free_dev;
user_interrupt_cnt = hdev->asic_prop.user_dec_intr_count +
hdev->asic_prop.user_interrupt_count;
@ -2241,9 +2295,8 @@ free_usr_intr_mem:
kfree(hdev->user_interrupt);
early_fini:
device_early_fini(hdev);
free_dev_ctrl:
put_device(hdev->dev_ctrl);
free_dev:
put_device(hdev->dev_ctrl);
put_device(hdev->dev);
out_disabled:
hdev->disabled = true;
@ -2364,7 +2417,9 @@ void hl_device_fini(struct hl_device *hdev)
hl_cb_pool_fini(hdev);
/* Reset the H/W. It will be in idle state after this returns */
hdev->asic_funcs->hw_fini(hdev, true, false);
rc = hdev->asic_funcs->hw_fini(hdev, true, false);
if (rc)
dev_err(hdev->dev, "hw_fini failed in device fini while removing device %d\n", rc);
hdev->fw_loader.fw_comp_loaded = FW_TYPE_NONE;
@ -2566,3 +2621,49 @@ void hl_handle_page_fault(struct hl_device *hdev, u64 addr, u16 eng_id, bool is_
if (event_mask)
*event_mask |= HL_NOTIFIER_EVENT_PAGE_FAULT;
}
static void hl_capture_hw_err(struct hl_device *hdev, u16 event_id)
{
struct hw_err_info *info = &hdev->captured_err_info.hw_err;
/* Capture only the first HW err */
if (atomic_cmpxchg(&info->event_detected, 0, 1))
return;
info->event.timestamp = ktime_to_ns(ktime_get());
info->event.event_id = event_id;
info->event_info_available = true;
}
void hl_handle_critical_hw_err(struct hl_device *hdev, u16 event_id, u64 *event_mask)
{
hl_capture_hw_err(hdev, event_id);
if (event_mask)
*event_mask |= HL_NOTIFIER_EVENT_CRITICL_HW_ERR;
}
static void hl_capture_fw_err(struct hl_device *hdev, struct hl_info_fw_err_info *fw_info)
{
struct fw_err_info *info = &hdev->captured_err_info.fw_err;
/* Capture only the first FW error */
if (atomic_cmpxchg(&info->event_detected, 0, 1))
return;
info->event.timestamp = ktime_to_ns(ktime_get());
info->event.err_type = fw_info->err_type;
if (fw_info->err_type == HL_INFO_FW_REPORTED_ERR)
info->event.event_id = fw_info->event_id;
info->event_info_available = true;
}
void hl_handle_fw_err(struct hl_device *hdev, struct hl_info_fw_err_info *info)
{
hl_capture_fw_err(hdev, info);
if (info->event_mask)
*info->event_mask |= HL_NOTIFIER_EVENT_CRITICL_FW_ERR;
}

View File

@ -71,7 +71,7 @@ free_fw_ver:
return NULL;
}
static int extract_fw_sub_versions(struct hl_device *hdev, char *preboot_ver)
static int hl_get_preboot_major_minor(struct hl_device *hdev, char *preboot_ver)
{
char major[8], minor[8], *first_dot, *second_dot;
int rc;
@ -86,7 +86,7 @@ static int extract_fw_sub_versions(struct hl_device *hdev, char *preboot_ver)
if (rc) {
dev_err(hdev->dev, "Error %d parsing preboot major version\n", rc);
goto out;
return rc;
}
/* skip the first dot */
@ -102,9 +102,6 @@ static int extract_fw_sub_versions(struct hl_device *hdev, char *preboot_ver)
if (rc)
dev_err(hdev->dev, "Error %d parsing preboot minor version\n", rc);
out:
kfree(preboot_ver);
return rc;
}
@ -1263,7 +1260,7 @@ void hl_fw_ask_hard_reset_without_linux(struct hl_device *hdev)
COMMS_RST_DEV, 0, false,
hdev->fw_loader.cpu_timeout);
if (rc)
dev_warn(hdev->dev, "Failed sending COMMS_RST_DEV\n");
dev_err(hdev->dev, "Failed sending COMMS_RST_DEV\n");
} else {
WREG32(static_loader->kmd_msg_to_cpu_reg, KMD_MSG_RST_DEV);
}
@ -1281,10 +1278,10 @@ void hl_fw_ask_halt_machine_without_linux(struct hl_device *hdev)
/* Stop device CPU to make sure nothing bad happens */
if (hdev->asic_prop.dynamic_fw_load) {
rc = hl_fw_dynamic_send_protocol_cmd(hdev, &hdev->fw_loader,
COMMS_GOTO_WFE, 0, true,
COMMS_GOTO_WFE, 0, false,
hdev->fw_loader.cpu_timeout);
if (rc)
dev_warn(hdev->dev, "Failed sending COMMS_GOTO_WFE\n");
dev_err(hdev->dev, "Failed sending COMMS_GOTO_WFE\n");
} else {
WREG32(static_loader->kmd_msg_to_cpu_reg, KMD_MSG_GOTO_WFE);
msleep(static_loader->cpu_reset_wait_msec);
@ -2181,8 +2178,8 @@ static int hl_fw_dynamic_read_device_fw_version(struct hl_device *hdev,
dev_info(hdev->dev, "preboot version %s\n", preboot_ver);
/* This function takes care of freeing preboot_ver */
rc = extract_fw_sub_versions(hdev, preboot_ver);
rc = hl_get_preboot_major_minor(hdev, preboot_ver);
kfree(preboot_ver);
if (rc)
return rc;
}
@ -3152,7 +3149,7 @@ int hl_fw_get_sec_attest_info(struct hl_device *hdev, struct cpucp_sec_attest_in
int hl_fw_send_generic_request(struct hl_device *hdev, enum hl_passthrough_type sub_opcode,
dma_addr_t buff, u32 *size)
{
struct cpucp_packet pkt = {0};
struct cpucp_packet pkt = {};
u64 result;
int rc = 0;

View File

@ -155,18 +155,12 @@ enum hl_mmu_enablement {
#define hl_asic_dma_alloc_coherent(hdev, size, dma_handle, flags) \
hl_asic_dma_alloc_coherent_caller(hdev, size, dma_handle, flags, __func__)
#define hl_cpu_accessible_dma_pool_alloc(hdev, size, dma_handle) \
hl_cpu_accessible_dma_pool_alloc_caller(hdev, size, dma_handle, __func__)
#define hl_asic_dma_pool_zalloc(hdev, size, mem_flags, dma_handle) \
hl_asic_dma_pool_zalloc_caller(hdev, size, mem_flags, dma_handle, __func__)
#define hl_asic_dma_free_coherent(hdev, size, cpu_addr, dma_handle) \
hl_asic_dma_free_coherent_caller(hdev, size, cpu_addr, dma_handle, __func__)
#define hl_cpu_accessible_dma_pool_free(hdev, size, vaddr) \
hl_cpu_accessible_dma_pool_free_caller(hdev, size, vaddr, __func__)
#define hl_asic_dma_pool_free(hdev, vaddr, dma_addr) \
hl_asic_dma_pool_free_caller(hdev, vaddr, dma_addr, __func__)
@ -378,6 +372,7 @@ enum hl_cs_type {
CS_RESERVE_SIGNALS,
CS_UNRESERVE_SIGNALS,
CS_TYPE_ENGINE_CORE,
CS_TYPE_ENGINES,
CS_TYPE_FLUSH_PCI_HBW_WRITES,
};
@ -592,6 +587,8 @@ struct hl_hints_range {
* @host_base_address: host physical start address for host DMA from device
* @host_end_address: host physical end address for host DMA from device
* @max_freq_value: current max clk frequency.
* @engine_core_interrupt_reg_addr: interrupt register address for engine core to use
* in order to raise events toward FW.
* @clk_pll_index: clock PLL index that specify which PLL determines the clock
* we display to the user
* @mmu_pgt_size: MMU page tables total size.
@ -612,8 +609,8 @@ struct hl_hints_range {
* @cb_pool_cb_cnt: number of CBs in the CB pool.
* @cb_pool_cb_size: size of each CB in the CB pool.
* @decoder_enabled_mask: which decoders are enabled.
* @decoder_binning_mask: which decoders are binned, 0 means usable and 1
* means binned (at most one binned decoder per dcore).
* @decoder_binning_mask: which decoders are binned, 0 means usable and 1 means binned.
* @rotator_enabled_mask: which rotators are enabled.
* @edma_enabled_mask: which EDMAs are enabled.
* @edma_binning_mask: which EDMAs are binned, 0 means usable and 1 means
* binned (at most one binned DMA).
@ -648,7 +645,8 @@ struct hl_hints_range {
* which the property supports_user_set_page_size is true
* (i.e. the DRAM supports multiple page sizes), otherwise
* it will shall be equal to dram_page_size.
* @num_engine_cores: number of engine cpu cores
* @num_engine_cores: number of engine cpu cores.
* @max_num_of_engines: maximum number of all engines in the ASIC.
* @num_of_special_blocks: special_blocks array size.
* @glbl_err_cause_num: global err cause number.
* @hbw_flush_reg: register to read to generate HBW flush. value of 0 means HBW flush is
@ -663,6 +661,8 @@ struct hl_hints_range {
* @first_available_cq: first available CQ for the user.
* @user_interrupt_count: number of user interrupts.
* @user_dec_intr_count: number of decoder interrupts exposed to user.
* @tpc_interrupt_id: interrupt id for TPC to use in order to raise events towards the host.
* @eq_interrupt_id: interrupt id for EQ, uses to synchronize EQ interrupts in hard-reset.
* @cache_line_size: device cache line size.
* @server_type: Server type that the ASIC is currently installed in.
* The value is according to enum hl_server_type in uapi file.
@ -698,6 +698,7 @@ struct hl_hints_range {
* @supports_user_set_page_size: true if user can set the allocation page size.
* @dma_mask: the dma mask to be set for this device
* @supports_advanced_cpucp_rc: true if new cpucp opcodes are supported.
* @supports_engine_modes: true if changing engines/engine_cores modes is supported.
*/
struct asic_fixed_properties {
struct hw_queue_properties *hw_queues_props;
@ -739,6 +740,7 @@ struct asic_fixed_properties {
u64 host_base_address;
u64 host_end_address;
u64 max_freq_value;
u64 engine_core_interrupt_reg_addr;
u32 clk_pll_index;
u32 mmu_pgt_size;
u32 mmu_pte_size;
@ -759,6 +761,7 @@ struct asic_fixed_properties {
u32 cb_pool_cb_size;
u32 decoder_enabled_mask;
u32 decoder_binning_mask;
u32 rotator_enabled_mask;
u32 edma_enabled_mask;
u32 edma_binning_mask;
u32 max_pending_cs;
@ -775,6 +778,7 @@ struct asic_fixed_properties {
u32 xbar_edge_enabled_mask;
u32 device_mem_alloc_default_page_size;
u32 num_engine_cores;
u32 max_num_of_engines;
u32 num_of_special_blocks;
u32 glbl_err_cause_num;
u32 hbw_flush_reg;
@ -788,6 +792,8 @@ struct asic_fixed_properties {
u16 first_available_cq[HL_MAX_DCORES];
u16 user_interrupt_count;
u16 user_dec_intr_count;
u16 tpc_interrupt_id;
u16 eq_interrupt_id;
u16 cache_line_size;
u16 server_type;
u8 completion_queues_count;
@ -811,6 +817,7 @@ struct asic_fixed_properties {
u8 supports_user_set_page_size;
u8 dma_mask;
u8 supports_advanced_cpucp_rc;
u8 supports_engine_modes;
};
/**
@ -1096,6 +1103,8 @@ struct hl_cq {
enum hl_user_interrupt_type {
HL_USR_INTERRUPT_CQ = 0,
HL_USR_INTERRUPT_DECODER,
HL_USR_INTERRUPT_TPC,
HL_USR_INTERRUPT_UNEXPECTED
};
/**
@ -1104,6 +1113,7 @@ enum hl_user_interrupt_type {
* @type: user interrupt type
* @wait_list_head: head to the list of user threads pending on this interrupt
* @wait_list_lock: protects wait_list_head
* @timestamp: last timestamp taken upon interrupt
* @interrupt_id: msix interrupt id
*/
struct hl_user_interrupt {
@ -1111,6 +1121,7 @@ struct hl_user_interrupt {
enum hl_user_interrupt_type type;
struct list_head wait_list_head;
spinlock_t wait_list_lock;
ktime_t timestamp;
u32 interrupt_id;
};
@ -1200,15 +1211,15 @@ struct hl_eq {
/**
* struct hl_dec - describes a decoder sw instance.
* @hdev: pointer to the device structure.
* @completion_abnrm_work: workqueue object to run when decoder generates an error interrupt
* @abnrm_intr_work: workqueue work item to run when decoder generates an error interrupt.
* @core_id: ID of the decoder.
* @base_addr: base address of the decoder.
*/
struct hl_dec {
struct hl_device *hdev;
struct work_struct completion_abnrm_work;
u32 core_id;
u32 base_addr;
struct hl_device *hdev;
struct work_struct abnrm_intr_work;
u32 core_id;
u32 base_addr;
};
/**
@ -1562,6 +1573,7 @@ struct engines_data {
* @access_dev_mem: access device memory
* @set_dram_bar_base: set the base of the DRAM BAR
* @set_engine_cores: set a config command to engine cores
* @set_engines: set a config command to user engines
* @send_device_activity: indication to FW about device availability
* @set_dram_properties: set DRAM related properties.
* @set_binning_masks: set binning/enable masks for all relevant components.
@ -1574,7 +1586,7 @@ struct hl_asic_funcs {
int (*sw_init)(struct hl_device *hdev);
int (*sw_fini)(struct hl_device *hdev);
int (*hw_init)(struct hl_device *hdev);
void (*hw_fini)(struct hl_device *hdev, bool hard_reset, bool fw_reset);
int (*hw_fini)(struct hl_device *hdev, bool hard_reset, bool fw_reset);
void (*halt_engines)(struct hl_device *hdev, bool hard_reset, bool fw_reset);
int (*suspend)(struct hl_device *hdev);
int (*resume)(struct hl_device *hdev);
@ -1701,6 +1713,8 @@ struct hl_asic_funcs {
u64 (*set_dram_bar_base)(struct hl_device *hdev, u64 addr);
int (*set_engine_cores)(struct hl_device *hdev, u32 *core_ids,
u32 num_cores, u32 core_command);
int (*set_engines)(struct hl_device *hdev, u32 *engine_ids,
u32 num_engines, u32 engine_command);
int (*send_device_activity)(struct hl_device *hdev, bool open);
int (*set_dram_properties)(struct hl_device *hdev);
int (*set_binning_masks)(struct hl_device *hdev);
@ -1824,7 +1838,7 @@ struct hl_cs_outcome_store {
* @hpriv: pointer to the private (Kernel Driver) data of the process (fd).
* @hdev: pointer to the device structure.
* @refcount: reference counter for the context. Context is released only when
* this hits 0l. It is incremented on CS and CS_WAIT.
* this hits 0. It is incremented on CS and CS_WAIT.
* @cs_pending: array of hl fence objects representing pending CS.
* @outcome_store: storage data structure used to remember outcomes of completed
* command submissions for a long time after CS id wraparound.
@ -2318,7 +2332,7 @@ struct hl_debugfs_entry {
* @userptr_list: list of available userptrs (virtual memory chunk descriptor).
* @userptr_spinlock: protects userptr_list.
* @ctx_mem_hash_list: list of available contexts with MMU mappings.
* @ctx_mem_hash_spinlock: protects cb_list.
* @ctx_mem_hash_mutex: protects list of available contexts with MMU mappings.
* @data_dma_blob_desc: data DMA descriptor of blob.
* @mon_dump_blob_desc: monitor dump descriptor of blob.
* @state_dump: data of the system states in case of a bad cs.
@ -2349,7 +2363,7 @@ struct hl_dbg_device_entry {
struct list_head userptr_list;
spinlock_t userptr_spinlock;
struct list_head ctx_mem_hash_list;
spinlock_t ctx_mem_hash_spinlock;
struct mutex ctx_mem_hash_mutex;
struct debugfs_blob_wrapper data_dma_blob_desc;
struct debugfs_blob_wrapper mon_dump_blob_desc;
char *state_dump[HL_STATE_DUMP_HIST_LEN];
@ -2974,8 +2988,8 @@ struct cs_timeout_info {
* @cq_addr: the address of the current handled command buffer
* @cq_size: the size of the current handled command buffer
* @cb_addr_streams_len: num of streams - actual len of cb_addr_streams array.
* should be equal to 1 incase of undefined opcode
* in Upper-CP (specific stream) and equal to 4 incase
* should be equal to 1 in case of undefined opcode
* in Upper-CP (specific stream) and equal to 4 in case
* of undefined opcode in Lower-CP.
* @engine_id: engine-id that the error occurred on
* @stream_id: the stream id the error occurred on. In case the stream equals to
@ -3031,18 +3045,56 @@ struct razwi_info {
bool razwi_info_available;
};
/**
* struct hw_err_info - HW error information.
* @event: holds information on the event.
* @event_detected: if set as 1, then a HW event was discovered for the
* first time after the driver has finished booting-up.
* currently we assume that only fatal events (that require hard-reset) are
* reported so we don't care of the others that might follow it.
* so once changed to 1, it will remain that way.
* TODO: support multiple events.
* @event_info_available: indicates that a HW event info is now available.
*/
struct hw_err_info {
struct hl_info_hw_err_event event;
atomic_t event_detected;
bool event_info_available;
};
/**
* struct fw_err_info - FW error information.
* @event: holds information on the event.
* @event_detected: if set as 1, then a FW event was discovered for the
* first time after the driver has finished booting-up.
* currently we assume that only fatal events (that require hard-reset) are
* reported so we don't care of the others that might follow it.
* so once changed to 1, it will remain that way.
* TODO: support multiple events.
* @event_info_available: indicates that a HW event info is now available.
*/
struct fw_err_info {
struct hl_info_fw_err_event event;
atomic_t event_detected;
bool event_info_available;
};
/**
* struct hl_error_info - holds information collected during an error.
* @cs_timeout: CS timeout error information.
* @razwi_info: RAZWI information.
* @undef_opcode: undefined opcode information.
* @page_fault_info: page fault information.
* @hw_err: (fatal) hardware error information.
* @fw_err: firmware error information.
*/
struct hl_error_info {
struct cs_timeout_info cs_timeout;
struct razwi_info razwi_info;
struct undefined_opcode_info undef_opcode;
struct page_fault_info page_fault_info;
struct hw_err_info hw_err;
struct fw_err_info fw_err;
};
/**
@ -3090,6 +3142,7 @@ struct hl_reset_info {
* (required only for PCI address match mode)
* @pcie_bar: array of available PCIe bars virtual addresses.
* @rmmio: configuration area address on SRAM.
* @hclass: pointer to the habanalabs class.
* @cdev: related char device.
* @cdev_ctrl: char device for control operations only (INFO IOCTL)
* @dev: related kernel basic device structure.
@ -3104,6 +3157,8 @@ struct hl_reset_info {
* @user_interrupt: array of hl_user_interrupt. upon the corresponding user
* interrupt, driver will monitor the list of fences
* registered to this interrupt.
* @tpc_interrupt: single TPC interrupt for all TPCs.
* @unexpected_error_interrupt: single interrupt for unexpected user error indication.
* @common_user_cq_interrupt: common user CQ interrupt for all user CQ interrupts.
* upon any user CQ interrupt, driver will monitor the
* list of fences registered to this common structure.
@ -3199,6 +3254,7 @@ struct hl_reset_info {
* drams are binned-out
* @tpc_binning: contains mask of tpc engines that is received from the f/w which indicates which
* tpc engines are binned-out
* @dmabuf_export_cnt: number of dma-buf exporting.
* @card_type: Various ASICs have several card types. This indicates the card
* type of the current device.
* @major: habanalabs kernel driver major.
@ -3253,6 +3309,8 @@ struct hl_reset_info {
* @supports_mmu_prefetch: true if prefetch is supported, otherwise false.
* @reset_upon_device_release: reset the device when the user closes the file descriptor of the
* device.
* @supports_ctx_switch: true if a ctx switch is required upon first submission.
* @support_preboot_binning: true if we support read binning info from preboot.
* @nic_ports_mask: Controls which NIC ports are enabled. Used only for testing.
* @fw_components: Controls which f/w components to load to the device. There are multiple f/w
* stages and sometimes we want to stop at a certain stage. Used only for testing.
@ -3266,14 +3324,13 @@ struct hl_reset_info {
* Used only for testing.
* @heartbeat: Controls if we want to enable the heartbeat mechanism vs. the f/w, which verifies
* that the f/w is always alive. Used only for testing.
* @supports_ctx_switch: true if a ctx switch is required upon first submission.
* @support_preboot_binning: true if we support read binning info from preboot.
*/
struct hl_device {
struct pci_dev *pdev;
u64 pcie_bar_phys[HL_PCI_NUM_BARS];
void __iomem *pcie_bar[HL_PCI_NUM_BARS];
void __iomem *rmmio;
struct class *hclass;
struct cdev cdev;
struct cdev cdev_ctrl;
struct device *dev;
@ -3286,6 +3343,8 @@ struct hl_device {
enum hl_asic_type asic_type;
struct hl_cq *completion_queue;
struct hl_user_interrupt *user_interrupt;
struct hl_user_interrupt tpc_interrupt;
struct hl_user_interrupt unexpected_error_interrupt;
struct hl_user_interrupt common_user_cq_interrupt;
struct hl_user_interrupt common_decoder_interrupt;
struct hl_cs **shadow_cs_queue;
@ -3369,7 +3428,7 @@ struct hl_device {
u64 fw_comms_poll_interval_usec;
u64 dram_binning;
u64 tpc_binning;
atomic_t dmabuf_export_cnt;
enum cpucp_card_types card_type;
u32 major;
u32 high_pll;
@ -3412,7 +3471,7 @@ struct hl_device {
u8 supports_ctx_switch;
u8 support_preboot_binning;
/* Parameters for bring-up */
/* Parameters for bring-up to be upstreamed */
u64 nic_ports_mask;
u64 fw_components;
u8 mmu_enable;
@ -3450,6 +3509,20 @@ struct hl_cs_encaps_sig_handle {
u32 count;
};
/**
* struct hl_info_fw_err_info - firmware error information structure
* @err_type: The type of error detected (or reported).
* @event_mask: Pointer to the event mask to be modified with the detected error flag
* (can be NULL)
* @event_id: The id of the event that reported the error
* (applicable when err_type is HL_INFO_FW_REPORTED_ERR).
*/
struct hl_info_fw_err_info {
enum hl_info_fw_err_type err_type;
u64 *event_mask;
u16 event_id;
};
/*
* IOCTLs
*/
@ -3474,6 +3547,10 @@ struct hl_ioctl_desc {
hl_ioctl_t *func;
};
static inline bool hl_is_fw_ver_below_1_9(struct hl_device *hdev)
{
return (hdev->fw_major_version < 42);
}
/*
* Kernel module functions that can be accessed by entire module
@ -3537,14 +3614,12 @@ static inline bool hl_mem_area_crosses_range(u64 address, u32 size,
}
uint64_t hl_set_dram_bar_default(struct hl_device *hdev, u64 addr);
void *hl_cpu_accessible_dma_pool_alloc(struct hl_device *hdev, size_t size, dma_addr_t *dma_handle);
void hl_cpu_accessible_dma_pool_free(struct hl_device *hdev, size_t size, void *vaddr);
void *hl_asic_dma_alloc_coherent_caller(struct hl_device *hdev, size_t size, dma_addr_t *dma_handle,
gfp_t flag, const char *caller);
void hl_asic_dma_free_coherent_caller(struct hl_device *hdev, size_t size, void *cpu_addr,
dma_addr_t dma_handle, const char *caller);
void *hl_cpu_accessible_dma_pool_alloc_caller(struct hl_device *hdev, size_t size,
dma_addr_t *dma_handle, const char *caller);
void hl_cpu_accessible_dma_pool_free_caller(struct hl_device *hdev, size_t size, void *vaddr,
const char *caller);
void *hl_asic_dma_pool_zalloc_caller(struct hl_device *hdev, size_t size, gfp_t mem_flags,
dma_addr_t *dma_handle, const char *caller);
void hl_asic_dma_pool_free_caller(struct hl_device *hdev, void *vaddr, dma_addr_t dma_addr,
@ -3591,7 +3666,7 @@ irqreturn_t hl_irq_handler_cq(int irq, void *arg);
irqreturn_t hl_irq_handler_eq(int irq, void *arg);
irqreturn_t hl_irq_handler_dec_abnrm(int irq, void *arg);
irqreturn_t hl_irq_handler_user_interrupt(int irq, void *arg);
irqreturn_t hl_irq_handler_default(int irq, void *arg);
irqreturn_t hl_irq_user_interrupt_thread_handler(int irq, void *arg);
u32 hl_cq_inc_ptr(u32 ptr);
int hl_asid_init(struct hl_device *hdev);
@ -3612,7 +3687,7 @@ int hl_ctx_get_fences(struct hl_ctx *ctx, u64 *seq_arr,
void hl_ctx_mgr_init(struct hl_ctx_mgr *mgr);
void hl_ctx_mgr_fini(struct hl_device *hdev, struct hl_ctx_mgr *mgr);
int hl_device_init(struct hl_device *hdev, struct class *hclass);
int hl_device_init(struct hl_device *hdev);
void hl_device_fini(struct hl_device *hdev);
int hl_device_suspend(struct hl_device *hdev);
int hl_device_resume(struct hl_device *hdev);
@ -3662,6 +3737,7 @@ bool cs_needs_timeout(struct hl_cs *cs);
bool is_staged_cs_last_exists(struct hl_device *hdev, struct hl_cs *cs);
struct hl_cs *hl_staged_cs_find_first(struct hl_device *hdev, u64 cs_seq);
void hl_multi_cs_completion_init(struct hl_device *hdev);
u32 hl_get_active_cs_num(struct hl_device *hdev);
void goya_set_asic_funcs(struct hl_device *hdev);
void gaudi_set_asic_funcs(struct hl_device *hdev);
@ -3861,6 +3937,7 @@ const char *hl_sync_engine_to_string(enum hl_sync_engine_type engine_type);
void hl_mem_mgr_init(struct device *dev, struct hl_mem_mgr *mmg);
void hl_mem_mgr_fini(struct hl_mem_mgr *mmg);
void hl_mem_mgr_idr_destroy(struct hl_mem_mgr *mmg);
int hl_mem_mgr_mmap(struct hl_mem_mgr *mmg, struct vm_area_struct *vma,
void *args);
struct hl_mmap_mem_buf *hl_mmap_mem_buf_get(struct hl_mem_mgr *mmg,
@ -3879,6 +3956,8 @@ void hl_handle_razwi(struct hl_device *hdev, u64 addr, u16 *engine_id, u16 num_o
void hl_capture_page_fault(struct hl_device *hdev, u64 addr, u16 eng_id, bool is_pmmu);
void hl_handle_page_fault(struct hl_device *hdev, u64 addr, u16 eng_id, bool is_pmmu,
u64 *event_mask);
void hl_handle_critical_hw_err(struct hl_device *hdev, u16 event_id, u64 *event_mask);
void hl_handle_fw_err(struct hl_device *hdev, struct hl_info_fw_err_info *info);
#ifdef CONFIG_DEBUG_FS

View File

@ -12,7 +12,6 @@
#include "../include/hw_ip/pci/pci_general.h"
#include <linux/pci.h>
#include <linux/aer.h>
#include <linux/module.h>
#define CREATE_TRACE_POINTS
@ -221,12 +220,9 @@ int hl_device_open(struct inode *inode, struct file *filp)
hl_debugfs_add_file(hpriv);
memset(&hdev->captured_err_info, 0, sizeof(hdev->captured_err_info));
atomic_set(&hdev->captured_err_info.cs_timeout.write_enable, 1);
atomic_set(&hdev->captured_err_info.razwi_info.razwi_detected, 0);
atomic_set(&hdev->captured_err_info.page_fault_info.page_fault_detected, 0);
hdev->captured_err_info.undef_opcode.write_enable = true;
hdev->captured_err_info.razwi_info.razwi_info_available = false;
hdev->captured_err_info.page_fault_info.page_fault_info_available = false;
hdev->open_counter++;
hdev->last_successful_open_jif = jiffies;
@ -237,6 +233,7 @@ int hl_device_open(struct inode *inode, struct file *filp)
out_err:
mutex_unlock(&hdev->fpriv_list_lock);
hl_mem_mgr_fini(&hpriv->mem_mgr);
hl_mem_mgr_idr_destroy(&hpriv->mem_mgr);
hl_ctx_mgr_fini(hpriv->hdev, &hpriv->ctx_mgr);
filp->private_data = NULL;
mutex_destroy(&hpriv->ctx_lock);
@ -324,6 +321,7 @@ static void copy_kernel_module_params_to_device(struct hl_device *hdev)
hdev->asic_prop.fw_security_enabled = is_asic_secured(hdev->asic_type);
hdev->major = hl_major;
hdev->hclass = hl_class;
hdev->memory_scrub = memory_scrub;
hdev->reset_on_lockup = reset_on_lockup;
hdev->boot_error_status_mask = boot_error_status_mask;
@ -550,9 +548,7 @@ static int hl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
pci_set_drvdata(pdev, hdev);
pci_enable_pcie_error_reporting(pdev);
rc = hl_device_init(hdev, hl_class);
rc = hl_device_init(hdev);
if (rc) {
dev_err(&pdev->dev, "Fatal error during habanalabs device init\n");
rc = -ENODEV;
@ -562,7 +558,6 @@ static int hl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return 0;
disable_device:
pci_disable_pcie_error_reporting(pdev);
pci_set_drvdata(pdev, NULL);
destroy_hdev(hdev);
@ -585,7 +580,6 @@ static void hl_pci_remove(struct pci_dev *pdev)
return;
hl_device_fini(hdev);
pci_disable_pcie_error_reporting(pdev);
pci_set_drvdata(pdev, NULL);
destroy_hdev(hdev);
}

View File

@ -102,11 +102,15 @@ static int hw_ip_info(struct hl_device *hdev, struct hl_info_args *args)
hw_ip.mme_master_slave_mode = prop->mme_master_slave_mode;
hw_ip.first_available_interrupt_id = prop->first_available_user_interrupt;
hw_ip.number_of_user_interrupts = prop->user_interrupt_count;
hw_ip.tpc_interrupt_id = prop->tpc_interrupt_id;
hw_ip.edma_enabled_mask = prop->edma_enabled_mask;
hw_ip.server_type = prop->server_type;
hw_ip.security_enabled = prop->fw_security_enabled;
hw_ip.revision_id = hdev->pdev->revision;
hw_ip.rotator_enabled_mask = prop->rotator_enabled_mask;
hw_ip.engine_core_interrupt_reg_addr = prop->engine_core_interrupt_reg_addr;
hw_ip.reserved_dram_size = dram_kmd_size;
return copy_to_user(out, &hw_ip,
min((size_t) size, sizeof(hw_ip))) ? -EFAULT : 0;
@ -830,6 +834,50 @@ static int user_mappings_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
return copy_to_user(out, pgf_info->user_mappings, actual_size) ? -EFAULT : 0;
}
static int hw_err_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
{
void __user *user_buf = (void __user *) (uintptr_t) args->return_pointer;
struct hl_device *hdev = hpriv->hdev;
u32 user_buf_size = args->return_size;
struct hw_err_info *info;
int rc;
if ((!user_buf_size) || (!user_buf))
return -EINVAL;
if (user_buf_size < sizeof(struct hl_info_hw_err_event))
return -ENOMEM;
info = &hdev->captured_err_info.hw_err;
if (!info->event_info_available)
return -ENOENT;
rc = copy_to_user(user_buf, &info->event, sizeof(struct hl_info_hw_err_event));
return rc ? -EFAULT : 0;
}
static int fw_err_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
{
void __user *user_buf = (void __user *) (uintptr_t) args->return_pointer;
struct hl_device *hdev = hpriv->hdev;
u32 user_buf_size = args->return_size;
struct fw_err_info *info;
int rc;
if ((!user_buf_size) || (!user_buf))
return -EINVAL;
if (user_buf_size < sizeof(struct hl_info_fw_err_event))
return -ENOMEM;
info = &hdev->captured_err_info.fw_err;
if (!info->event_info_available)
return -ENOENT;
rc = copy_to_user(user_buf, &info->event, sizeof(struct hl_info_fw_err_event));
return rc ? -EFAULT : 0;
}
static int send_fw_generic_request(struct hl_device *hdev, struct hl_info_args *info_args)
{
void __user *buff = (void __user *) (uintptr_t) info_args->return_pointer;
@ -950,6 +998,14 @@ static int _hl_info_ioctl(struct hl_fpriv *hpriv, void *data,
case HL_INFO_UNREGISTER_EVENTFD:
return eventfd_unregister(hpriv, args);
case HL_INFO_HW_ERR_EVENT:
return hw_err_info(hpriv, args);
case HL_INFO_FW_ERR_EVENT:
return fw_err_info(hpriv, args);
case HL_INFO_DRAM_USAGE:
return dram_usage_info(hpriv, args);
default:
break;
}
@ -962,10 +1018,6 @@ static int _hl_info_ioctl(struct hl_fpriv *hpriv, void *data,
}
switch (args->op) {
case HL_INFO_DRAM_USAGE:
rc = dram_usage_info(hpriv, args);
break;
case HL_INFO_HW_IDLE:
rc = hw_idle(hdev, args);
break;

View File

@ -280,7 +280,6 @@ static void handle_user_interrupt(struct hl_device *hdev, struct hl_user_interru
struct list_head *ts_reg_free_list_head = NULL;
struct timestamp_reg_work_obj *job;
bool reg_node_handle_fail = false;
ktime_t now = ktime_get();
int rc;
/* For registration nodes:
@ -303,13 +302,13 @@ static void handle_user_interrupt(struct hl_device *hdev, struct hl_user_interru
if (pend->ts_reg_info.buf) {
if (!reg_node_handle_fail) {
rc = handle_registration_node(hdev, pend,
&ts_reg_free_list_head, now);
&ts_reg_free_list_head, intr->timestamp);
if (rc)
reg_node_handle_fail = true;
}
} else {
/* Handle wait target value node */
pend->fence.timestamp = now;
pend->fence.timestamp = intr->timestamp;
complete_all(&pend->fence.completion);
}
}
@ -326,6 +325,26 @@ static void handle_user_interrupt(struct hl_device *hdev, struct hl_user_interru
}
}
static void handle_tpc_interrupt(struct hl_device *hdev)
{
u64 event_mask;
u32 flags;
event_mask = HL_NOTIFIER_EVENT_TPC_ASSERT |
HL_NOTIFIER_EVENT_USER_ENGINE_ERR |
HL_NOTIFIER_EVENT_DEVICE_RESET;
flags = HL_DRV_RESET_DELAY;
dev_err_ratelimited(hdev->dev, "Received TPC assert\n");
hl_device_cond_reset(hdev, flags, event_mask);
}
static void handle_unexpected_user_interrupt(struct hl_device *hdev)
{
dev_err_ratelimited(hdev->dev, "Received unexpected user error interrupt\n");
}
/**
* hl_irq_handler_user_interrupt - irq handler for user interrupts
*
@ -334,6 +353,23 @@ static void handle_user_interrupt(struct hl_device *hdev, struct hl_user_interru
*
*/
irqreturn_t hl_irq_handler_user_interrupt(int irq, void *arg)
{
struct hl_user_interrupt *user_int = arg;
user_int->timestamp = ktime_get();
return IRQ_WAKE_THREAD;
}
/**
* hl_irq_user_interrupt_thread_handler - irq thread handler for user interrupts.
* This function is invoked by threaded irq mechanism
*
* @irq: irq number
* @arg: pointer to user interrupt structure
*
*/
irqreturn_t hl_irq_user_interrupt_thread_handler(int irq, void *arg)
{
struct hl_user_interrupt *user_int = arg;
struct hl_device *hdev = user_int->hdev;
@ -351,6 +387,12 @@ irqreturn_t hl_irq_handler_user_interrupt(int irq, void *arg)
/* Handle decoder interrupt registered on this specific irq */
handle_user_interrupt(hdev, user_int);
break;
case HL_USR_INTERRUPT_TPC:
handle_tpc_interrupt(hdev);
break;
case HL_USR_INTERRUPT_UNEXPECTED:
handle_unexpected_user_interrupt(hdev);
break;
default:
break;
}
@ -358,24 +400,6 @@ irqreturn_t hl_irq_handler_user_interrupt(int irq, void *arg)
return IRQ_HANDLED;
}
/**
* hl_irq_handler_default - default irq handler
*
* @irq: irq number
* @arg: pointer to user interrupt structure
*
*/
irqreturn_t hl_irq_handler_default(int irq, void *arg)
{
struct hl_user_interrupt *user_interrupt = arg;
struct hl_device *hdev = user_interrupt->hdev;
u32 interrupt_id = user_interrupt->interrupt_id;
dev_err(hdev->dev, "got invalid user interrupt %u", interrupt_id);
return IRQ_HANDLED;
}
/**
* hl_irq_handler_eq - irq handler for event queue
*
@ -391,8 +415,8 @@ irqreturn_t hl_irq_handler_eq(int irq, void *arg)
struct hl_eq_entry *eq_base;
struct hl_eqe_work *handle_eqe_work;
bool entry_ready;
u32 cur_eqe;
u16 cur_eqe_index;
u32 cur_eqe, ctl;
u16 cur_eqe_index, event_type;
eq_base = eq->kernel_address;
@ -405,11 +429,10 @@ irqreturn_t hl_irq_handler_eq(int irq, void *arg)
cur_eqe_index = FIELD_GET(EQ_CTL_INDEX_MASK, cur_eqe);
if ((hdev->event_queue.check_eqe_index) &&
(((eq->prev_eqe_index + 1) & EQ_CTL_INDEX_MASK)
!= cur_eqe_index)) {
(((eq->prev_eqe_index + 1) & EQ_CTL_INDEX_MASK) != cur_eqe_index)) {
dev_dbg(hdev->dev,
"EQE 0x%x in queue is ready but index does not match %d!=%d",
eq_base[eq->ci].hdr.ctl,
"EQE %#x in queue is ready but index does not match %d!=%d",
cur_eqe,
((eq->prev_eqe_index + 1) & EQ_CTL_INDEX_MASK),
cur_eqe_index);
break;
@ -426,7 +449,10 @@ irqreturn_t hl_irq_handler_eq(int irq, void *arg)
dma_rmb();
if (hdev->disabled && !hdev->reset_info.in_compute_reset) {
dev_warn(hdev->dev, "Device disabled but received an EQ event\n");
ctl = le32_to_cpu(eq_entry->hdr.ctl);
event_type = ((ctl & EQ_CTL_EVENT_TYPE_MASK) >> EQ_CTL_EVENT_TYPE_SHIFT);
dev_warn(hdev->dev,
"Device disabled but received an EQ event (%u)\n", event_type);
goto skip_irq;
}
@ -463,7 +489,7 @@ irqreturn_t hl_irq_handler_dec_abnrm(int irq, void *arg)
{
struct hl_dec *dec = arg;
schedule_work(&dec->completion_abnrm_work);
schedule_work(&dec->abnrm_intr_work);
return IRQ_HANDLED;
}

View File

@ -235,10 +235,8 @@ static int dma_map_host_va(struct hl_device *hdev, u64 addr, u64 size,
}
rc = hl_pin_host_memory(hdev, addr, size, userptr);
if (rc) {
dev_err(hdev->dev, "Failed to pin host memory\n");
if (rc)
goto pin_err;
}
userptr->dma_mapped = true;
userptr->dir = DMA_BIDIRECTIONAL;
@ -607,6 +605,7 @@ static u64 get_va_block(struct hl_device *hdev,
bool is_align_pow_2 = is_power_of_2(va_range->page_size);
bool is_hint_dram_addr = hl_is_dram_va(hdev, hint_addr);
bool force_hint = flags & HL_MEM_FORCE_HINT;
int rc;
if (is_align_pow_2)
align_mask = ~((u64)va_block_align - 1);
@ -724,9 +723,13 @@ static u64 get_va_block(struct hl_device *hdev,
kfree(new_va_block);
}
if (add_prev)
add_va_block_locked(hdev, &va_range->list, prev_start,
prev_end);
if (add_prev) {
rc = add_va_block_locked(hdev, &va_range->list, prev_start, prev_end);
if (rc) {
reserved_valid_start = 0;
goto out;
}
}
print_va_list_locked(hdev, &va_range->list);
out:
@ -1097,10 +1100,8 @@ static int map_device_va(struct hl_ctx *ctx, struct hl_mem_in *args, u64 *device
huge_page_size = hdev->asic_prop.pmmu_huge.page_size;
rc = dma_map_host_va(hdev, addr, size, &userptr);
if (rc) {
dev_err(hdev->dev, "failed to get userptr from va\n");
if (rc)
return rc;
}
rc = init_phys_pg_pack_from_userptr(ctx, userptr,
&phys_pg_pack, false);
@ -1270,6 +1271,18 @@ init_page_pack_err:
return rc;
}
/* Should be called while the context's mem_hash_lock is taken */
static struct hl_vm_hash_node *get_vm_hash_node_locked(struct hl_ctx *ctx, u64 vaddr)
{
struct hl_vm_hash_node *hnode;
hash_for_each_possible(ctx->mem_hash, hnode, node, vaddr)
if (vaddr == hnode->vaddr)
return hnode;
return NULL;
}
/**
* unmap_device_va() - unmap the given device virtual address.
* @ctx: pointer to the context structure.
@ -1285,10 +1298,10 @@ static int unmap_device_va(struct hl_ctx *ctx, struct hl_mem_in *args,
{
struct hl_vm_phys_pg_pack *phys_pg_pack = NULL;
u64 vaddr = args->unmap.device_virt_addr;
struct hl_vm_hash_node *hnode = NULL;
struct asic_fixed_properties *prop;
struct hl_device *hdev = ctx->hdev;
struct hl_userptr *userptr = NULL;
struct hl_vm_hash_node *hnode;
struct hl_va_range *va_range;
enum vm_type *vm_type;
bool is_userptr;
@ -1298,15 +1311,10 @@ static int unmap_device_va(struct hl_ctx *ctx, struct hl_mem_in *args,
/* protect from double entrance */
mutex_lock(&ctx->mem_hash_lock);
hash_for_each_possible(ctx->mem_hash, hnode, node, (unsigned long)vaddr)
if (vaddr == hnode->vaddr)
break;
hnode = get_vm_hash_node_locked(ctx, vaddr);
if (!hnode) {
mutex_unlock(&ctx->mem_hash_lock);
dev_err(hdev->dev,
"unmap failed, no mem hnode for vaddr 0x%llx\n",
vaddr);
dev_err(hdev->dev, "unmap failed, no mem hnode for vaddr 0x%llx\n", vaddr);
return -EINVAL;
}
@ -1779,6 +1787,44 @@ static void hl_unmap_dmabuf(struct dma_buf_attachment *attachment,
kfree(sgt);
}
static struct hl_vm_hash_node *memhash_node_export_get(struct hl_ctx *ctx, u64 addr)
{
struct hl_device *hdev = ctx->hdev;
struct hl_vm_hash_node *hnode;
/* get the memory handle */
mutex_lock(&ctx->mem_hash_lock);
hnode = get_vm_hash_node_locked(ctx, addr);
if (!hnode) {
mutex_unlock(&ctx->mem_hash_lock);
dev_dbg(hdev->dev, "map address %#llx not found\n", addr);
return ERR_PTR(-EINVAL);
}
if (upper_32_bits(hnode->handle)) {
mutex_unlock(&ctx->mem_hash_lock);
dev_dbg(hdev->dev, "invalid handle %#llx for map address %#llx\n",
hnode->handle, addr);
return ERR_PTR(-EINVAL);
}
/*
* node found, increase export count so this memory cannot be unmapped
* and the hash node cannot be deleted.
*/
hnode->export_cnt++;
mutex_unlock(&ctx->mem_hash_lock);
return hnode;
}
static void memhash_node_export_put(struct hl_ctx *ctx, struct hl_vm_hash_node *hnode)
{
mutex_lock(&ctx->mem_hash_lock);
hnode->export_cnt--;
mutex_unlock(&ctx->mem_hash_lock);
}
static void hl_release_dmabuf(struct dma_buf *dmabuf)
{
struct hl_dmabuf_priv *hl_dmabuf = dmabuf->priv;
@ -1789,13 +1835,15 @@ static void hl_release_dmabuf(struct dma_buf *dmabuf)
ctx = hl_dmabuf->ctx;
if (hl_dmabuf->memhash_hnode) {
mutex_lock(&ctx->mem_hash_lock);
hl_dmabuf->memhash_hnode->export_cnt--;
mutex_unlock(&ctx->mem_hash_lock);
}
if (hl_dmabuf->memhash_hnode)
memhash_node_export_put(ctx, hl_dmabuf->memhash_hnode);
atomic_dec(&ctx->hdev->dmabuf_export_cnt);
hl_ctx_put(ctx);
/* Paired with get_file() in export_dmabuf() */
fput(ctx->hpriv->filp);
kfree(hl_dmabuf);
}
@ -1834,6 +1882,13 @@ static int export_dmabuf(struct hl_ctx *ctx,
hl_dmabuf->ctx = ctx;
hl_ctx_get(hl_dmabuf->ctx);
atomic_inc(&ctx->hdev->dmabuf_export_cnt);
/* Get compute device file to enforce release order, such that all exported dma-buf will be
* released first and only then the compute device.
* Paired with fput() in hl_release_dmabuf().
*/
get_file(ctx->hpriv->filp);
*dmabuf_fd = fd;
@ -1933,47 +1988,6 @@ static int validate_export_params(struct hl_device *hdev, u64 device_addr, u64 s
return 0;
}
static struct hl_vm_hash_node *memhash_node_export_get(struct hl_ctx *ctx, u64 addr)
{
struct hl_device *hdev = ctx->hdev;
struct hl_vm_hash_node *hnode;
/* get the memory handle */
mutex_lock(&ctx->mem_hash_lock);
hash_for_each_possible(ctx->mem_hash, hnode, node, (unsigned long)addr)
if (addr == hnode->vaddr)
break;
if (!hnode) {
mutex_unlock(&ctx->mem_hash_lock);
dev_dbg(hdev->dev, "map address %#llx not found\n", addr);
return ERR_PTR(-EINVAL);
}
if (upper_32_bits(hnode->handle)) {
mutex_unlock(&ctx->mem_hash_lock);
dev_dbg(hdev->dev, "invalid handle %#llx for map address %#llx\n",
hnode->handle, addr);
return ERR_PTR(-EINVAL);
}
/*
* node found, increase export count so this memory cannot be unmapped
* and the hash node cannot be deleted.
*/
hnode->export_cnt++;
mutex_unlock(&ctx->mem_hash_lock);
return hnode;
}
static void memhash_node_export_put(struct hl_ctx *ctx, struct hl_vm_hash_node *hnode)
{
mutex_lock(&ctx->mem_hash_lock);
hnode->export_cnt--;
mutex_unlock(&ctx->mem_hash_lock);
}
static struct hl_vm_phys_pg_pack *get_phys_pg_pack_from_hash_node(struct hl_device *hdev,
struct hl_vm_hash_node *hnode)
{
@ -2221,11 +2235,11 @@ static struct hl_mmap_mem_buf_behavior hl_ts_behavior = {
* allocate_timestamps_buffers() - allocate timestamps buffers
* This function will allocate ts buffer that will later on be mapped to the user
* in order to be able to read the timestamp.
* in additon it'll allocate an extra buffer for registration management.
* in addition it'll allocate an extra buffer for registration management.
* since we cannot fail during registration for out-of-memory situation, so
* we'll prepare a pool which will be used as user interrupt nodes and instead
* of dynamically allocating nodes while registration we'll pick the node from
* this pool. in addtion it'll add node to the mapping hash which will be used
* this pool. in addition it'll add node to the mapping hash which will be used
* to map user ts buffer to the internal kernel ts buffer.
* @hpriv: pointer to the private data of the fd
* @args: ioctl input

View File

@ -275,7 +275,7 @@ int hl_mem_mgr_mmap(struct hl_mem_mgr *mmg, struct vm_area_struct *vma,
if (atomic_cmpxchg(&buf->mmap, 0, 1)) {
dev_err(mmg->dev,
"%s, Memory mmap failed, already mmaped to user\n",
"%s, Memory mmap failed, already mapped to user\n",
buf->behavior->topic);
rc = -EINVAL;
goto put_mem;
@ -341,8 +341,19 @@ void hl_mem_mgr_fini(struct hl_mem_mgr *mmg)
"%s: Buff handle %u for CTX is still alive\n",
topic, id);
}
}
/* TODO: can it happen that some buffer is still in use at this point? */
/**
* hl_mem_mgr_idr_destroy() - destroy memory manager IDR.
* @mmg: parent unified memory manager
*
* Destroy the memory manager IDR.
* Shall be called when IDR is empty and no memory buffers are in use.
*/
void hl_mem_mgr_idr_destroy(struct hl_mem_mgr *mmg)
{
if (!idr_is_empty(&mmg->handles))
dev_crit(mmg->dev, "memory manager IDR is destroyed while it is not empty!\n");
idr_destroy(&mmg->handles);
}

View File

@ -540,8 +540,8 @@ static void hl_mmu_pa_page_with_offset(struct hl_ctx *ctx, u64 virt_addr,
u32 page_off;
/*
* Bit arithmetics cannot be used for non power of two page
* sizes. In addition, since bit arithmetics is not used,
* Bit arithmetic cannot be used for non power of two page
* sizes. In addition, since bit arithmetic is not used,
* we cannot ignore dram base. All that shall be considered.
*/
@ -679,7 +679,9 @@ int hl_mmu_invalidate_cache(struct hl_device *hdev, bool is_hard, u32 flags)
rc = hdev->asic_funcs->mmu_invalidate_cache(hdev, is_hard, flags);
if (rc)
dev_err_ratelimited(hdev->dev, "MMU cache invalidation failed\n");
dev_err_ratelimited(hdev->dev,
"%s cache invalidation failed, rc=%d\n",
flags == VM_TYPE_USERPTR ? "PMMU" : "HMMU", rc);
return rc;
}
@ -692,7 +694,9 @@ int hl_mmu_invalidate_cache_range(struct hl_device *hdev, bool is_hard,
rc = hdev->asic_funcs->mmu_invalidate_cache_range(hdev, is_hard, flags,
asid, va, size);
if (rc)
dev_err_ratelimited(hdev->dev, "MMU cache range invalidation failed\n");
dev_err_ratelimited(hdev->dev,
"%s cache range invalidation failed: va=%#llx, size=%llu, rc=%d",
flags == VM_TYPE_USERPTR ? "PMMU" : "HMMU", va, size, rc);
return rc;
}
@ -757,7 +761,7 @@ u64 hl_mmu_get_next_hop_addr(struct hl_ctx *ctx, u64 curr_pte)
* @mmu_prop: MMU properties.
* @hop_idx: HOP index.
* @hop_addr: HOP address.
* @virt_addr: virtual address fro the translation.
* @virt_addr: virtual address for the translation.
*
* @return the matching PTE value on success, otherwise U64_MAX.
*/

View File

@ -420,7 +420,6 @@ int hl_pci_init(struct hl_device *hdev)
unmap_pci_bars:
hl_pci_bars_unmap(hdev);
disable_device:
pci_clear_master(pdev);
pci_disable_device(pdev);
return rc;
@ -436,6 +435,5 @@ void hl_pci_fini(struct hl_device *hdev)
{
hl_pci_bars_unmap(hdev);
pci_clear_master(hdev->pdev);
pci_disable_device(hdev->pdev);
}

View File

@ -502,7 +502,7 @@ free_glbl_sec:
int hl_init_pb_ranges_single_dcore(struct hl_device *hdev, u32 dcore_offset,
u32 num_instances, u32 instance_offset,
const u32 pb_blocks[], u32 blocks_array_size,
const struct range *regs_range_array, u32 regs_range_array_size)
const struct range *user_regs_range_array, u32 user_regs_range_array_size)
{
int i;
struct hl_block_glbl_sec *glbl_sec;
@ -514,8 +514,8 @@ int hl_init_pb_ranges_single_dcore(struct hl_device *hdev, u32 dcore_offset,
return -ENOMEM;
hl_secure_block(hdev, glbl_sec, blocks_array_size);
hl_unsecure_registers_range(hdev, regs_range_array,
regs_range_array_size, 0, pb_blocks, glbl_sec,
hl_unsecure_registers_range(hdev, user_regs_range_array,
user_regs_range_array_size, 0, pb_blocks, glbl_sec,
blocks_array_size);
/* Fill all blocks with the same configuration */

View File

@ -10,7 +10,7 @@
#include <linux/io-64-nonatomic-lo-hi.h>
extern struct hl_device *hdev;
struct hl_device;
/* special blocks */
#define HL_MAX_NUM_OF_GLBL_ERR_CAUSE 10

View File

@ -497,10 +497,14 @@ int hl_sysfs_init(struct hl_device *hdev)
if (rc) {
dev_err(hdev->dev,
"Failed to add groups to device, error %d\n", rc);
return rc;
goto remove_groups;
}
return 0;
remove_groups:
device_remove_groups(hdev->dev, hl_dev_attr_groups);
return rc;
}
void hl_sysfs_fini(struct hl_device *hdev)

View File

@ -656,6 +656,7 @@ static int gaudi_set_fixed_properties(struct hl_device *hdev)
prop->cfg_size = CFG_SIZE;
prop->max_asid = MAX_ASID;
prop->num_of_events = GAUDI_EVENT_SIZE;
prop->max_num_of_engines = GAUDI_ENGINE_ID_SIZE;
prop->tpc_enabled_mask = TPC_ENABLED_MASK;
set_default_power_values(hdev);
@ -679,6 +680,10 @@ static int gaudi_set_fixed_properties(struct hl_device *hdev)
(num_sync_stream_queues * HL_RSVD_MONS);
prop->first_available_user_interrupt = USHRT_MAX;
prop->tpc_interrupt_id = USHRT_MAX;
/* single msi */
prop->eq_interrupt_id = 0;
for (i = 0 ; i < HL_MAX_DCORES ; i++)
prop->first_available_cq[i] = USHRT_MAX;
@ -867,13 +872,18 @@ pci_init:
rc = hl_fw_read_preboot_status(hdev);
if (rc) {
if (hdev->reset_on_preboot_fail)
/* we are already on failure flow, so don't check if hw_fini fails. */
hdev->asic_funcs->hw_fini(hdev, true, false);
goto pci_fini;
}
if (gaudi_get_hw_state(hdev) == HL_DEVICE_HW_STATE_DIRTY) {
dev_dbg(hdev->dev, "H/W state is dirty, must reset before initializing\n");
hdev->asic_funcs->hw_fini(hdev, true, false);
rc = hdev->asic_funcs->hw_fini(hdev, true, false);
if (rc) {
dev_err(hdev->dev, "failed to reset HW in dirty state (%d)\n", rc);
goto pci_fini;
}
}
return 0;
@ -2010,38 +2020,6 @@ static int gaudi_enable_msi_single(struct hl_device *hdev)
return rc;
}
static int gaudi_enable_msi_multi(struct hl_device *hdev)
{
int cq_cnt = hdev->asic_prop.completion_queues_count;
int rc, i, irq_cnt_init, irq;
for (i = 0, irq_cnt_init = 0 ; i < cq_cnt ; i++, irq_cnt_init++) {
irq = gaudi_pci_irq_vector(hdev, i, false);
rc = request_irq(irq, hl_irq_handler_cq, 0, gaudi_irq_name[i],
&hdev->completion_queue[i]);
if (rc) {
dev_err(hdev->dev, "Failed to request IRQ %d", irq);
goto free_irqs;
}
}
irq = gaudi_pci_irq_vector(hdev, GAUDI_EVENT_QUEUE_MSI_IDX, true);
rc = request_irq(irq, hl_irq_handler_eq, 0, gaudi_irq_name[cq_cnt],
&hdev->event_queue);
if (rc) {
dev_err(hdev->dev, "Failed to request IRQ %d", irq);
goto free_irqs;
}
return 0;
free_irqs:
for (i = 0 ; i < irq_cnt_init ; i++)
free_irq(gaudi_pci_irq_vector(hdev, i, false),
&hdev->completion_queue[i]);
return rc;
}
static int gaudi_enable_msi(struct hl_device *hdev)
{
struct gaudi_device *gaudi = hdev->asic_specific;
@ -2056,14 +2034,7 @@ static int gaudi_enable_msi(struct hl_device *hdev)
return rc;
}
if (rc < NUMBER_OF_INTERRUPTS) {
gaudi->multi_msi_mode = false;
rc = gaudi_enable_msi_single(hdev);
} else {
gaudi->multi_msi_mode = true;
rc = gaudi_enable_msi_multi(hdev);
}
rc = gaudi_enable_msi_single(hdev);
if (rc)
goto free_pci_irq_vectors;
@ -2079,47 +2050,23 @@ free_pci_irq_vectors:
static void gaudi_sync_irqs(struct hl_device *hdev)
{
struct gaudi_device *gaudi = hdev->asic_specific;
int i, cq_cnt = hdev->asic_prop.completion_queues_count;
if (!(gaudi->hw_cap_initialized & HW_CAP_MSI))
return;
/* Wait for all pending IRQs to be finished */
if (gaudi->multi_msi_mode) {
for (i = 0 ; i < cq_cnt ; i++)
synchronize_irq(gaudi_pci_irq_vector(hdev, i, false));
synchronize_irq(gaudi_pci_irq_vector(hdev,
GAUDI_EVENT_QUEUE_MSI_IDX,
true));
} else {
synchronize_irq(gaudi_pci_irq_vector(hdev, 0, false));
}
synchronize_irq(gaudi_pci_irq_vector(hdev, 0, false));
}
static void gaudi_disable_msi(struct hl_device *hdev)
{
struct gaudi_device *gaudi = hdev->asic_specific;
int i, irq, cq_cnt = hdev->asic_prop.completion_queues_count;
if (!(gaudi->hw_cap_initialized & HW_CAP_MSI))
return;
gaudi_sync_irqs(hdev);
if (gaudi->multi_msi_mode) {
irq = gaudi_pci_irq_vector(hdev, GAUDI_EVENT_QUEUE_MSI_IDX,
true);
free_irq(irq, &hdev->event_queue);
for (i = 0 ; i < cq_cnt ; i++) {
irq = gaudi_pci_irq_vector(hdev, i, false);
free_irq(irq, &hdev->completion_queue[i]);
}
} else {
free_irq(gaudi_pci_irq_vector(hdev, 0, false), hdev);
}
free_irq(gaudi_pci_irq_vector(hdev, 0, false), hdev);
pci_free_irq_vectors(hdev->pdev);
gaudi->hw_cap_initialized &= ~HW_CAP_MSI;
@ -3718,7 +3665,7 @@ static int gaudi_mmu_init(struct hl_device *hdev)
if (rc) {
dev_err(hdev->dev,
"failed to set hop0 addr for asid %d\n", i);
goto err;
return rc;
}
}
@ -3729,7 +3676,9 @@ static int gaudi_mmu_init(struct hl_device *hdev)
/* mem cache invalidation */
WREG32(mmSTLB_MEM_CACHE_INVALIDATION, 1);
hl_mmu_invalidate_cache(hdev, true, 0);
rc = hl_mmu_invalidate_cache(hdev, true, 0);
if (rc)
return rc;
WREG32(mmMMU_UP_MMU_ENABLE, 1);
WREG32(mmMMU_UP_SPI_MASK, 0xF);
@ -3745,9 +3694,6 @@ static int gaudi_mmu_init(struct hl_device *hdev)
gaudi->hw_cap_initialized |= HW_CAP_MMU;
return 0;
err:
return rc;
}
static int gaudi_load_firmware_to_device(struct hl_device *hdev)
@ -3915,11 +3861,7 @@ static int gaudi_init_cpu_queues(struct hl_device *hdev, u32 cpu_timeout)
WREG32(mmCPU_IF_PF_PQ_PI, 0);
if (gaudi->multi_msi_mode)
WREG32(mmCPU_IF_QUEUE_INIT, PQ_INIT_STATUS_READY_FOR_CP);
else
WREG32(mmCPU_IF_QUEUE_INIT,
PQ_INIT_STATUS_READY_FOR_CP_SINGLE_MSI);
WREG32(mmCPU_IF_QUEUE_INIT, PQ_INIT_STATUS_READY_FOR_CP_SINGLE_MSI);
irq_handler_offset = prop->gic_interrupts_enable ?
mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR :
@ -4068,7 +4010,7 @@ disable_queues:
return rc;
}
static void gaudi_hw_fini(struct hl_device *hdev, bool hard_reset, bool fw_reset)
static int gaudi_hw_fini(struct hl_device *hdev, bool hard_reset, bool fw_reset)
{
struct cpu_dyn_regs *dyn_regs =
&hdev->fw_loader.dynamic_loader.comm_desc.cpu_dyn_regs;
@ -4078,7 +4020,7 @@ static void gaudi_hw_fini(struct hl_device *hdev, bool hard_reset, bool fw_reset
if (!hard_reset) {
dev_err(hdev->dev, "GAUDI doesn't support soft-reset\n");
return;
return 0;
}
if (hdev->pldm) {
@ -4199,10 +4141,10 @@ skip_reset:
msleep(reset_timeout_ms);
status = RREG32(mmPSOC_GLOBAL_CONF_BTM_FSM);
if (status & PSOC_GLOBAL_CONF_BTM_FSM_STATE_MASK)
dev_err(hdev->dev,
"Timeout while waiting for device to reset 0x%x\n",
status);
if (status & PSOC_GLOBAL_CONF_BTM_FSM_STATE_MASK) {
dev_err(hdev->dev, "Timeout while waiting for device to reset 0x%x\n", status);
return -ETIMEDOUT;
}
if (gaudi) {
gaudi->hw_cap_initialized &= ~(HW_CAP_CPU | HW_CAP_CPU_Q | HW_CAP_HBM |
@ -4215,6 +4157,7 @@ skip_reset:
hdev->device_cpu_is_halted = false;
}
return 0;
}
static int gaudi_suspend(struct hl_device *hdev)
@ -5595,7 +5538,6 @@ static void gaudi_add_end_of_cb_packets(struct hl_device *hdev, void *kernel_add
u32 len, u32 original_len, u64 cq_addr, u32 cq_val,
u32 msi_vec, bool eb)
{
struct gaudi_device *gaudi = hdev->asic_specific;
struct packet_msg_prot *cq_pkt;
struct packet_nop *cq_padding;
u64 msi_addr;
@ -5625,12 +5567,7 @@ static void gaudi_add_end_of_cb_packets(struct hl_device *hdev, void *kernel_add
tmp |= FIELD_PREP(GAUDI_PKT_CTL_MB_MASK, 1);
cq_pkt->ctl = cpu_to_le32(tmp);
cq_pkt->value = cpu_to_le32(1);
if (gaudi->multi_msi_mode)
msi_addr = mmPCIE_MSI_INTR_0 + msi_vec * 4;
else
msi_addr = mmPCIE_CORE_MSI_REQ;
msi_addr = hdev->pdev ? mmPCIE_CORE_MSI_REQ : mmPCIE_MSI_INTR_0 + msi_vec * 4;
cq_pkt->addr = cpu_to_le64(CFG_BASE + msi_addr);
}
@ -7297,7 +7234,7 @@ static void gaudi_handle_qman_err(struct hl_device *hdev, u16 event_type, u64 *e
}
static void gaudi_print_irq_info(struct hl_device *hdev, u16 event_type,
bool razwi, u64 *event_mask)
bool check_razwi, u64 *event_mask)
{
bool is_read = false, is_write = false;
u16 engine_id[2], num_of_razwi_eng = 0;
@ -7316,7 +7253,7 @@ static void gaudi_print_irq_info(struct hl_device *hdev, u16 event_type,
dev_err_ratelimited(hdev->dev, "Received H/W interrupt %d [\"%s\"]\n",
event_type, desc);
if (razwi) {
if (check_razwi) {
gaudi_print_and_get_razwi_info(hdev, &engine_id[0], &engine_id[1], &is_read,
&is_write);
gaudi_print_and_get_mmu_error_info(hdev, &razwi_addr, event_mask);
@ -7333,8 +7270,9 @@ static void gaudi_print_irq_info(struct hl_device *hdev, u16 event_type,
num_of_razwi_eng = 1;
}
hl_handle_razwi(hdev, razwi_addr, engine_id, num_of_razwi_eng, razwi_flags,
event_mask);
if (razwi_flags)
hl_handle_razwi(hdev, razwi_addr, engine_id, num_of_razwi_eng,
razwi_flags, event_mask);
}
}
@ -7633,6 +7571,7 @@ static void gaudi_print_clk_change_info(struct hl_device *hdev, u16 event_type,
static void gaudi_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_entry)
{
struct gaudi_device *gaudi = hdev->asic_specific;
struct hl_info_fw_err_info fw_err_info;
u64 data = le64_to_cpu(eq_entry->data[0]), event_mask = 0;
u32 ctl = le32_to_cpu(eq_entry->hdr.ctl);
u32 fw_fatal_err_flag = 0, flags = 0;
@ -7911,7 +7850,10 @@ static void gaudi_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_entr
case GAUDI_EVENT_FW_ALIVE_S:
gaudi_print_irq_info(hdev, event_type, false, &event_mask);
gaudi_print_fw_alive_info(hdev, &eq_entry->fw_alive);
event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR;
fw_err_info.err_type = HL_INFO_FW_REPORTED_ERR;
fw_err_info.event_id = event_type;
fw_err_info.event_mask = &event_mask;
hl_handle_fw_err(hdev, &fw_err_info);
goto reset_device;
default:
@ -7942,6 +7884,10 @@ reset_device:
}
if (reset_required) {
/* escalate general hw errors to critical/fatal error */
if (event_mask & HL_NOTIFIER_EVENT_GENERAL_HW_ERR)
hl_handle_critical_hw_err(hdev, event_type, &event_mask);
hl_device_cond_reset(hdev, flags, event_mask);
} else {
hl_fw_unmask_irq(hdev, event_type);
@ -8403,19 +8349,26 @@ static int gaudi_internal_cb_pool_init(struct hl_device *hdev,
}
mutex_lock(&hdev->mmu_lock);
rc = hl_mmu_map_contiguous(ctx, hdev->internal_cb_va_base,
hdev->internal_cb_pool_dma_addr,
HOST_SPACE_INTERNAL_CB_SZ);
hl_mmu_invalidate_cache(hdev, false, MMU_OP_USERPTR);
mutex_unlock(&hdev->mmu_lock);
if (rc)
goto unreserve_internal_cb_pool;
rc = hl_mmu_invalidate_cache(hdev, false, MMU_OP_USERPTR);
if (rc)
goto unmap_internal_cb_pool;
mutex_unlock(&hdev->mmu_lock);
return 0;
unmap_internal_cb_pool:
hl_mmu_unmap_contiguous(ctx, hdev->internal_cb_va_base,
HOST_SPACE_INTERNAL_CB_SZ);
unreserve_internal_cb_pool:
mutex_unlock(&hdev->mmu_lock);
hl_unreserve_va_block(hdev, ctx, hdev->internal_cb_va_base,
HOST_SPACE_INTERNAL_CB_SZ);
destroy_internal_cb_pool:

View File

@ -28,20 +28,8 @@
#define NUMBER_OF_COLLECTIVE_QUEUES 12
#define NUMBER_OF_SOBS_IN_GRP 11
/*
* Number of MSI interrupts IDS:
* Each completion queue has 1 ID
* The event queue has 1 ID
*/
#define NUMBER_OF_INTERRUPTS (NUMBER_OF_CMPLT_QUEUES + \
NUMBER_OF_CPU_HW_QUEUES)
#define GAUDI_STREAM_MASTER_ARR_SIZE 8
#if (NUMBER_OF_INTERRUPTS > GAUDI_MSI_ENTRIES)
#error "Number of MSI interrupts must be smaller or equal to GAUDI_MSI_ENTRIES"
#endif
#define CORESIGHT_TIMEOUT_USEC 100000 /* 100 ms */
#define GAUDI_MAX_CLK_FREQ 2200000000ull /* 2200 MHz */
@ -324,8 +312,6 @@ struct gaudi_internal_qman_info {
* signal we can use this engine in later code paths.
* Each bit is cleared upon reset of its corresponding H/W
* engine.
* @multi_msi_mode: whether we are working in multi MSI single MSI mode.
* Multi MSI is possible only with IOMMU enabled.
* @mmu_cache_inv_pi: PI for MMU cache invalidation flow. The H/W expects an
* 8-bit value so use u8.
*/
@ -345,7 +331,6 @@ struct gaudi_device {
u32 events_stat[GAUDI_EVENT_SIZE];
u32 events_stat_aggregate[GAUDI_EVENT_SIZE];
u32 hw_cap_initialized;
u8 multi_msi_mode;
u8 mmu_cache_inv_pi;
};

File diff suppressed because it is too large Load Diff

View File

@ -240,6 +240,8 @@
#define GAUDI2_SOB_INCREMENT_BY_ONE (FIELD_PREP(DCORE0_SYNC_MNGR_OBJS_SOB_OBJ_VAL_MASK, 1) | \
FIELD_PREP(DCORE0_SYNC_MNGR_OBJS_SOB_OBJ_INC_MASK, 1))
#define GAUDI2_NUM_TESTED_QS (GAUDI2_QUEUE_ID_CPU_PQ - GAUDI2_QUEUE_ID_PDMA_0_0)
#define GAUDI2_NUM_OF_GLBL_ERR_CAUSE 8
enum gaudi2_reserved_sob_id {
@ -387,6 +389,8 @@ enum gaudi2_edma_id {
* We have 64 CQ's per dcore, CQ0 in dcore 0 is reserved for legacy mode
*/
#define GAUDI2_NUM_USER_INTERRUPTS 255
#define GAUDI2_NUM_RESERVED_INTERRUPTS 1
#define GAUDI2_TOTAL_USER_INTERRUPTS (GAUDI2_NUM_USER_INTERRUPTS + GAUDI2_NUM_RESERVED_INTERRUPTS)
enum gaudi2_irq_num {
GAUDI2_IRQ_NUM_EVENT_QUEUE = GAUDI2_EVENT_QUEUE_MSIX_IDX,
@ -410,12 +414,15 @@ enum gaudi2_irq_num {
GAUDI2_IRQ_NUM_SHARED_DEC0_ABNRM,
GAUDI2_IRQ_NUM_SHARED_DEC1_NRM,
GAUDI2_IRQ_NUM_SHARED_DEC1_ABNRM,
GAUDI2_IRQ_NUM_DEC_LAST = GAUDI2_IRQ_NUM_SHARED_DEC1_ABNRM,
GAUDI2_IRQ_NUM_COMPLETION,
GAUDI2_IRQ_NUM_NIC_PORT_FIRST,
GAUDI2_IRQ_NUM_NIC_PORT_LAST = (GAUDI2_IRQ_NUM_NIC_PORT_FIRST + NIC_NUMBER_OF_PORTS - 1),
GAUDI2_IRQ_NUM_TPC_ASSERT,
GAUDI2_IRQ_NUM_RESERVED_FIRST,
GAUDI2_IRQ_NUM_RESERVED_LAST = (GAUDI2_MSIX_ENTRIES - GAUDI2_NUM_USER_INTERRUPTS - 1),
GAUDI2_IRQ_NUM_USER_FIRST,
GAUDI2_IRQ_NUM_RESERVED_LAST = (GAUDI2_MSIX_ENTRIES - GAUDI2_TOTAL_USER_INTERRUPTS - 1),
GAUDI2_IRQ_NUM_UNEXPECTED_ERROR = RESERVED_MSIX_UNEXPECTED_USER_ERROR_INTERRUPT,
GAUDI2_IRQ_NUM_USER_FIRST = GAUDI2_IRQ_NUM_UNEXPECTED_ERROR + 1,
GAUDI2_IRQ_NUM_USER_LAST = (GAUDI2_IRQ_NUM_USER_FIRST + GAUDI2_NUM_USER_INTERRUPTS - 1),
GAUDI2_IRQ_NUM_LAST = (GAUDI2_MSIX_ENTRIES - 1)
};
@ -447,6 +454,17 @@ struct dup_block_ctx {
unsigned int instances;
};
/**
* struct gaudi2_queues_test_info - Holds the address of a the messages used for testing the
* device queues.
* @dma_addr: the address used by the HW for accessing the message.
* @kern_addr: The address used by the driver for accessing the message.
*/
struct gaudi2_queues_test_info {
dma_addr_t dma_addr;
void *kern_addr;
};
/**
* struct gaudi2_device - ASIC specific manage structure.
* @cpucp_info_get: get information on device from CPU-CP
@ -505,6 +523,7 @@ struct dup_block_ctx {
* @flush_db_fifo: flag to force flush DB FIFO after a write.
* @hbm_cfg: HBM subsystem settings
* @hw_queues_lock_mutex: used by simulator instead of hw_queues_lock.
* @queues_test_info: information used by the driver when testing the HW queues.
*/
struct gaudi2_device {
int (*cpucp_info_get)(struct hl_device *hdev);
@ -532,6 +551,9 @@ struct gaudi2_device {
u32 events_stat[GAUDI2_EVENT_SIZE];
u32 events_stat_aggregate[GAUDI2_EVENT_SIZE];
u32 num_of_valid_hw_events;
/* Queue testing */
struct gaudi2_queues_test_info queues_test_info[GAUDI2_NUM_TESTED_QS];
};
/*

Some files were not shown because too many files have changed in this diff Show More